Index: metastore/src/model/package.jdo =================================================================== --- metastore/src/model/package.jdo (revision 1235046) +++ metastore/src/model/package.jdo (working copy) @@ -214,8 +214,76 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -274,6 +342,11 @@ + + + + + @@ -701,5 +774,6 @@ + Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (revision 1235046) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (working copy) @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; +import java.util.Set; public class MStorageDescriptor { private MColumnDescriptor cd; @@ -32,6 +33,7 @@ private List bucketCols; private List sortCols; private Map parameters; + private Set clusterStorage; public MStorageDescriptor() {} @@ -63,7 +65,37 @@ this.parameters = parameters; } + /** + * @param cd + * @param location + * @param inputFormat + * @param outputFormat + * @param isCompressed + * @param numBuckets + * @param serDeInfo + * @param bucketCols + * @param sortOrder + * @param parameters + * @param clusterStorage + */ + public MStorageDescriptor(MColumnDescriptor cd, String location, String inputFormat, + String outputFormat, boolean isCompressed, int numBuckets, MSerDeInfo serDeInfo, + List bucketCols, List sortOrder, Map parameters, + Set clusterStorage) { + this.cd = cd; + this.location = location; + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.isCompressed = isCompressed; + this.numBuckets = numBuckets; + this.serDeInfo = serDeInfo; + this.bucketCols = bucketCols; + this.sortCols = sortOrder; + this.parameters = parameters; + this.clusterStorage = clusterStorage; + } + /** * @return the location */ @@ -205,4 +237,21 @@ public List getSortCols() { return sortCols; } + + + /** + * @return list of cluster storage descriptor + */ + public Set getClusterStorage() { + return clusterStorage; + } + + + /** + * @param clusterStorage list of cluster storages to set + */ + public void setClusterStorage( + Set clusterStorage) { + this.clusterStorage = clusterStorage; + } } Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MClusterStorageDescriptor.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MClusterStorageDescriptor.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MClusterStorageDescriptor.java (revision 0) @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import java.io.Serializable; +import java.util.Map; + +public class MClusterStorageDescriptor implements Serializable { + + private MCluster cluster; + private String location; + private boolean isPrimary; + private boolean dataSynced; + private Map parameters; + + /** + * @return cluster object + */ + public MCluster getCluster() { + return cluster; + } + + /** + * @param cluster + */ + public void setCluster(MCluster cluster) { + this.cluster = cluster; + } + + /** + * @return data location stored in this cluster + */ + public String getLocation() { + return location; + } + + /** + * @param location location in this cluster + */ + public void setLocation(String location) { + this.location = location; + } + + /** + * @return parameters + */ + public Map getParameters() { + return parameters; + } + + /** + * @param parameters + */ + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + /** + * @return isPrimary + */ + public boolean getIsPrimary() { + return isPrimary; + } + + /** + * @param isPrimary + */ + public void setIsPrimary(boolean isPrimary) { + this.isPrimary = isPrimary; + } + + /** + * @return is the replica's data up to date. + */ + public boolean getDataSynced() { + return dataSynced; + } + + /** + * set this replica's data to be up to date. + * @param dataSynced + */ + public void setDataSynced(boolean dataSynced) { + this.dataSynced = dataSynced; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MCluster.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MCluster.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MCluster.java (revision 0) @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +import java.io.Serializable; +import java.util.Map; + +public class MCluster implements Serializable { + + private String clusterName; + private String description; + private String location; + private int createTime; + private Map parameters; + + public MCluster() { + } + + public MCluster(String clusterName, String description, + String location, int createTime, Map parameters) { + super(); + this.clusterName = clusterName; + this.description = description; + this.location = location; + this.createTime = createTime; + this.parameters = parameters; + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Map getParameters() { + return parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + public int getCreateTime() { + return createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + } +} Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1235046) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -22,9 +22,11 @@ import java.util.Map; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hive.metastore.api.Cluster; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -189,6 +191,20 @@ public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + public abstract Boolean addCluster(Cluster cluster) + throws InvalidObjectException, MetaException; + + public abstract Cluster getCluster(String name) throws MetaException; + + public abstract List listClusters() throws MetaException; + + public abstract boolean dropCluster(String clusterName) + throws MetaException, InvalidOperationException, + NoSuchObjectException; + + public abstract void alterCluster(String clusterName, + Cluster cluster) throws MetaException; + public abstract boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; @@ -305,4 +321,5 @@ throws MetaException, InvalidObjectException, NoSuchObjectException; public abstract long cleanupEvents(); + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1235046) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Cluster; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -184,6 +185,24 @@ public void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException; + /** + * Drop table on one cluster. + * + * @param name + * @param dbName + * @param tableName + * @param deleteData + * @param ignoreUnknownTab + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @throws InvalidOperationException + */ + public void dropTableOnCluster(String name, String dbName, + String tableName, boolean deleteData, boolean ignoreUnknownTab) + throws NoSuchObjectException, MetaException, TException, + InvalidOperationException; + public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException; @@ -536,7 +555,23 @@ public boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + /** + * Drop partition on one cluster. + * + * @param clusterName + * @param dbName + * @param tblName + * @param partVals + * @param deleteData + * @return + */ + public boolean dropPartitionWithClusterName(String clusterName, + String dbName, String tblName, List partVals, + boolean deleteData) throws NoSuchObjectException, + MetaException, TException, InvalidOperationException; + + /** * updates a partition to new partition * * @param dbName @@ -846,5 +881,56 @@ */ public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException; + /** + * @param cluster + * @throws MetaException + * @throws TException + * @throws AlreadyExistsException + * @throws InvalidObjectException + */ + public void createCluster(Cluster cluster) + throws AlreadyExistsException, MetaException, TException, InvalidObjectException; + /** + * @param name + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @throws InvalidOperationException + */ + public void dropCluster(String name) throws NoSuchObjectException, + MetaException, TException, InvalidOperationException; + + /** + * @param clusterName + * @return + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + public Cluster getCluster(String clusterName) + throws NoSuchObjectException, MetaException, TException; + + /** + * alter the cluster object + * + * @param name + * @param cluster + * @return + * @throws NoSuchObjectException + * @throws AlreadyExistsException + */ + public void alter_cluster(String name, Cluster cluster) + throws MetaException, TException, InvalidObjectException, + NoSuchObjectException, AlreadyExistsException; + + /** + * return all clusters. + * + * @return + * @throws MetaException + * @throws TException + */ + public List listClusters() throws MetaException, TException; + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1235046) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -40,6 +40,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Cluster; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -1217,4 +1218,53 @@ return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); } + @Override + public void createCluster(Cluster cluster) + throws AlreadyExistsException, MetaException, TException, InvalidObjectException { + client.create_cluster(cluster); + } + + @Override + public void dropCluster(String name) throws NoSuchObjectException, + MetaException, TException, InvalidOperationException { + client.drop_cluster(name); + } + + @Override + public Cluster getCluster(String clusterName) + throws NoSuchObjectException, MetaException, TException { + assert clusterName != null; + return client.get_cluster(clusterName); + } + + @Override + public List listClusters() throws MetaException, TException { + return client.list_clusters(); + } + + @Override + public void alter_cluster(String name, Cluster cluster) + throws MetaException, TException, InvalidObjectException, + NoSuchObjectException, AlreadyExistsException { + client.alter_cluster(name, cluster); + } + + @Override + public boolean dropPartitionWithClusterName(String clusterName, + String dbName, String tblName, List partVals, + boolean deleteData) throws NoSuchObjectException, MetaException, TException, + InvalidOperationException { + return client.drop_partition_on_cluster(clusterName, dbName, tblName, + partVals, deleteData); + } + + @Override + public void dropTableOnCluster(String clusterName, String dbName, + String tableName, boolean deleteData, boolean ignoreUnknownTab) + throws NoSuchObjectException, MetaException, TException, + InvalidOperationException { + client.drop_table_on_cluster(clusterName, dbName, tableName, + deleteData); + } + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1235046) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -55,6 +55,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Cluster; +import org.apache.hadoop.hive.metastore.api.ClusterStorageDescriptor; import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.Database; @@ -253,6 +255,8 @@ alterHandlerName, AlterHandler.class), hiveConf); wh = new Warehouse(hiveConf); + DEFAULT_CLUSTER_NAME = hiveConf.get("hive.metastore.cluster.default", ""); + retryInterval = HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.METASTOREINTERVAL); retryLimit = HiveConf.getIntVar(hiveConf, @@ -616,6 +620,7 @@ } private static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + private String DEFAULT_CLUSTER_NAME = ""; private Path getDefaultDatabasePath(String dbName) throws MetaException { if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { @@ -1100,9 +1105,11 @@ return (ms.getTable(dbname, name) != null); } - private void drop_table_core(final RawStore ms, final String dbname, - final String name, final boolean deleteData) - throws NoSuchObjectException, MetaException, IOException { + private void drop_table_core(final RawStore ms, + final String dbname, final String name, + final String clusterName, final boolean deleteData) + throws NoSuchObjectException, MetaException, IOException, + InvalidOperationException { boolean success = false; boolean isExternal = false; @@ -1127,6 +1134,49 @@ "The table " + name + " is an index table. Please do drop index instead."); } + isExternal = isExternal(tbl); + + List clusterSDS = tbl.getSd().getClusterStorage(); + boolean dropOnPrimary = false; + ClusterStorageDescriptor toRemoveClusterSD = null; + if(clusterSDS != null && !clusterSDS.isEmpty()) { + //check is any secondary cluster exists + //if has replicas, drop on primary is not allowed + boolean hasMoreReplica = clusterSDS.size() > 1; + for(ClusterStorageDescriptor clusterSD: clusterSDS) { + if (clusterName.equalsIgnoreCase( + clusterSD.getCluster().getName())) { + if(clusterSD.isPrimary()) { + dropOnPrimary = true; + } + toRemoveClusterSD = clusterSD; + tblPath = new Path(clusterSD.getLocation()); + break; + } + } + + if(tblPath == null) { + throw new InvalidOperationException("Table is not found on cluster " + clusterName); + } + + if(hasMoreReplica && dropOnPrimary) { + throw new InvalidOperationException( + "Can not drop on primary cluster while there are more than one replica."); + } + } else { + //there is no cluster support for this table. ignore the cluster name + if(tbl.getSd().getLocation() != null && !tbl.getSd().getLocation().trim().equals("")) { + tblPath = new Path(tbl.getSd().getLocation()); + } + dropOnPrimary = true; + } + + if (!wh.isWritable(tblPath.getParent())) { + throw new MetaException("Table metadata not deleted since " + + tblPath.getParent() + " is not writable by " + + hiveConf.getUser()); + } + if (!isIndexTable) { try { List indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE); @@ -1140,19 +1190,19 @@ throw new MetaException(e.getMessage()); } } - isExternal = isExternal(tbl); - if (tbl.getSd().getLocation() != null) { - tblPath = new Path(tbl.getSd().getLocation()); - if (!wh.isWritable(tblPath.getParent())) { - throw new MetaException("Table metadata not deleted since " + - tblPath.getParent() + " is not writable by " + - hiveConf.getUser()); + + if(dropOnPrimary) { + if (!ms.dropTable(dbname, name)) { + throw new MetaException("Unable to drop table"); } + } else { + tbl.getSd().getClusterStorage().remove(toRemoveClusterSD); + try { + ms.alterTable(dbname, name, tbl); + } catch (InvalidObjectException e) { + throw new InvalidOperationException(e.getMessage()); + } } - - if (!ms.dropTable(dbname, name)) { - throw new MetaException("Unable to drop table"); - } success = ms.commitTransaction(); } finally { if (!success) { @@ -1176,7 +1226,8 @@ success = executeWithRetry(new Command() { @Override public Boolean run(RawStore ms) throws Exception { - drop_table_core(ms, dbname, name, deleteData); + drop_table_core(ms, dbname, name, DEFAULT_CLUSTER_NAME, + deleteData); return Boolean.TRUE; } }); @@ -1184,6 +1235,8 @@ throw e; } catch (MetaException e) { throw e; + } catch (InvalidOperationException e) { + throw new RuntimeException(e); } catch (Exception e) { assert(e instanceof RuntimeException); throw (RuntimeException)e; @@ -1193,6 +1246,38 @@ } + public void drop_table_on_cluster(final String cluster_name, + final String dbname, final String name, + final boolean deleteData) throws NoSuchObjectException, + MetaException, InvalidOperationException { + startTableFunction("drop_table_on_cluster", dbname, name); + + boolean success = false; + try { + success = executeWithRetry(new Command() { + @Override + public Boolean run(RawStore ms) throws Exception { + drop_table_core(ms, dbname, name, cluster_name, + deleteData); + return Boolean.TRUE; + } + }); + } catch (NoSuchObjectException e) { + throw e; + } catch (MetaException e) { + throw e; + } catch (InvalidOperationException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } finally { + endFunction("drop_table", success); + } + + } + + /** * Is this an external table? * @@ -1742,6 +1827,13 @@ } + public boolean drop_partition_on_cluster(String cluster_name, + String db_name, String tbl_name, List part_vals, + boolean deleteData) throws NoSuchObjectException, + MetaException, InvalidOperationException{ + return true; + } + public Partition get_partition(final String db_name, final String tbl_name, final List part_vals) throws MetaException, NoSuchObjectException { startPartitionFunction("get_partition", db_name, tbl_name, part_vals); @@ -2519,8 +2611,8 @@ ret = executeWithRetry(new Command() { @Override public Boolean run(RawStore ms) throws Exception { - return drop_index_by_name_core(ms, dbName, tblName, - indexName, deleteData); + return drop_index_by_name_core(ms, DEFAULT_CLUSTER_NAME, + dbName, tblName, indexName, deleteData); } }); } catch (NoSuchObjectException e) { @@ -2539,9 +2631,44 @@ return ret; } + public boolean drop_index_by_name_on_cluster( + final String clusterName, final String dbName, + final String tblName, final String indexName, + final boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + startFunction("drop_index_by_name_on_cluster", ": cluster=" + + clusterName + " db=" + dbName + " tbl=" + tblName + + " index=" + indexName); + + boolean ret = false; + try { + ret = executeWithRetry(new Command() { + @Override + public Boolean run(RawStore ms) throws Exception { + return drop_index_by_name_core(ms, clusterName, dbName, + tblName, indexName, deleteData); + } + }); + } catch (NoSuchObjectException e) { + throw e; + } catch (MetaException e) { + throw e; + } catch (TException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } finally { + endFunction("drop_index_by_name_on_cluster", ret); + } + + return ret; + } + private boolean drop_index_by_name_core(final RawStore ms, - final String dbName, final String tblName, - final String indexName, final boolean deleteData) throws NoSuchObjectException, + final String clusterName, final String dbName, + final String tblName, final String indexName, + final boolean deleteData) throws NoSuchObjectException, MetaException, TException, IOException { boolean success = false; @@ -2554,28 +2681,71 @@ if (index == null) { throw new NoSuchObjectException(indexName + " doesn't exist"); } - ms.dropIndex(dbName, tblName, indexName); String idxTblName = index.getIndexTableName(); + Table tbl = null; + boolean dropOnPrimary = false; + ClusterStorageDescriptor toRemoveClusterSD = null; + if (idxTblName != null) { - Table tbl = null; - tbl = this.get_table(dbName, idxTblName); + //check the underlying index table + tbl = get_table(dbName, idxTblName); if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } - if (tbl.getSd().getLocation() != null) { + List clusterSDS = tbl.getSd().getClusterStorage(); + if(clusterSDS != null && !clusterSDS.isEmpty()) { + // check is any secondary cluster exists + // if has replicas, drop on primary is not allowed + boolean hasMoreReplica = clusterSDS.size() > 1; + for(ClusterStorageDescriptor clusterSD: clusterSDS) { + if (clusterName.equalsIgnoreCase( + clusterSD.getCluster().getName())) { + if(clusterSD.isPrimary()) { + dropOnPrimary = true; + } + toRemoveClusterSD = clusterSD; + tblPath = new Path(clusterSD.getLocation()); + break; + } + } + + if (hasMoreReplica && dropOnPrimary) { + throw new MetaException( + "Can not drop a underlying index table on primary cluster while there are more than one replica. " + + "Run drop index on non-primary cluster first."); + } + } else { + //there is no cluster support for this table. ignore the cluster name tblPath = new Path(tbl.getSd().getLocation()); - if (!wh.isWritable(tblPath.getParent())) { - throw new MetaException("Index table metadata not deleted since " + - tblPath.getParent() + " is not writable by " + - hiveConf.getUser()); + dropOnPrimary = true; + } + } + + if (tblPath != null && !wh.isWritable(tblPath.getParent())) { + throw new MetaException("Table metadata not deleted since " + + tblPath.getParent() + " is not writable by " + + hiveConf.getUser()); + } + + if (dropOnPrimary) { + ms.dropIndex(dbName, tblName, indexName); + } + + if(tbl != null) { + if(dropOnPrimary) { + ms.dropTable(dbName, tblName); + } else { + if (toRemoveClusterSD != null) { + tbl.getSd().getClusterStorage().remove(toRemoveClusterSD); + try { + ms.alterTable(dbName, tblName, tbl); + } catch (InvalidObjectException e) { + throw new MetaException(e.getMessage()); + } } } - if (!ms.dropTable(dbName, idxTblName)) { - throw new MetaException("Unable to drop underlying data table " - + idxTblName + " for index " + idxTblName); - } } success = ms.commitTransaction(); } finally { @@ -2739,6 +2909,212 @@ } @Override + public void create_cluster(final Cluster cluster) + throws AlreadyExistsException, InvalidObjectException, + MetaException, TException { + + incrementCounter("create_cluster"); + try { + executeWithRetry(new Command() { + @Override + public Boolean run(RawStore ms) throws Exception { + create_cluster_core(ms, cluster); + return Boolean.TRUE; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void create_cluster_core(RawStore ms, Cluster cluster) + throws AlreadyExistsException, MetaException, + InvalidObjectException { + + if (!MetaStoreUtils.validateName(cluster.getName())) { + throw new InvalidObjectException(cluster.getName() + + " is not a valid cluster name"); + } + + boolean success = false; + try { + ms.openTransaction(); + + if (is_cluster_exists(ms, cluster.getName())) { + throw new AlreadyExistsException("Cluster " + + cluster.getName() + " already exists"); + } + + // set create time + long time = System.currentTimeMillis() / 1000; + cluster.setCreateTime((int) time); + ms.addCluster(cluster); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + private boolean is_cluster_exists(RawStore ms, String name) + throws MetaException { + return ms.getCluster(name) != null; + } + + private void drop_cluster_core(RawStore ms, final String clusterName) + throws NoSuchObjectException, InvalidOperationException, MetaException, + IOException { + boolean success = false; + try { + ms.openTransaction(); + if (ms.dropCluster(clusterName)) { + success = ms.commitTransaction(); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + public void drop_cluster(final String clusterName) + throws NoSuchObjectException, InvalidOperationException, MetaException { + + startFunction("drop_cluster", ": " + clusterName); + boolean success = false; + try { + success = executeWithRetry(new Command() { + @Override + public Boolean run(RawStore ms) throws Exception { + drop_cluster_core(ms, clusterName); + return Boolean.TRUE; + } + }); + } catch (NoSuchObjectException e) { + throw e; + } catch (InvalidOperationException e) { + throw e; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } finally { + endFunction("drop_database", success); + } + } + + @Override + public Cluster get_cluster(final String clusterName) + throws MetaException, NoSuchObjectException, TException { + + incrementCounter("get_cluster"); + try { + executeWithRetry(new Command() { + @Override + public Cluster run(RawStore ms) throws Exception { + Cluster ret = ms.getCluster(clusterName); + if (ret == null) { + throw new NoSuchObjectException(clusterName + + " cluster not found"); + } + return ret; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return null; + } + + @Override + public List list_clusters() throws MetaException, TException { + incrementCounter("list_clusters"); + try { + executeWithRetry(new Command>() { + @Override + public List run(RawStore ms) throws Exception { + List ret = ms.listClusters(); + return ret; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return null; + } + + @Override + public void alter_cluster(final String clusterName, final Cluster cluster) + throws MetaException, InvalidObjectException, MetaException, + NoSuchObjectException, TException { + + incrementCounter("create_cluster"); + try { + executeWithRetry(new Command() { + @Override + public Boolean run(RawStore ms) throws Exception { + alter_cluster_core(ms, clusterName, cluster); + return Boolean.TRUE; + } + }); + } catch (InvalidObjectException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + protected void alter_cluster_core(RawStore ms, + String clusterName, Cluster cluster) throws MetaException, + InvalidObjectException, NoSuchObjectException, + AlreadyExistsException { + + cluster.setName(cluster.getName().toLowerCase()); + if (!MetaStoreUtils.validateName(cluster.getName())) { + throw new InvalidObjectException(cluster.getName() + + " is not a valid cluster name"); + } + + boolean success = false; + + try { + ms.openTransaction(); + if (!is_cluster_exists(ms, clusterName)) { + throw new NoSuchObjectException("Cluster " + + cluster.getName() + " does not exist."); + } + + if (!cluster.getName().equals(clusterName)) { + // this is rename operation + if (is_cluster_exists(ms, cluster.getName())) { + throw new AlreadyExistsException("Cluster " + + cluster.getName() + " already exists."); + } + } + + ms.alterCluster(clusterName, cluster); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + + } + + @Override public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List groupNames) throws MetaException, TException { @@ -3477,8 +3853,9 @@ TException { Collections.addAll(groupNames, username); return groupNames; + } + } - } /** Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1235046) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.Cluster; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; @@ -61,6 +62,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -79,6 +81,8 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MCluster; +import org.apache.hadoop.hive.metastore.model.MClusterStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MDBPrivilege; import org.apache.hadoop.hive.metastore.model.MDatabase; @@ -3917,4 +3921,169 @@ } return delCnt; } + + @Override + public Boolean addCluster(Cluster cluster) + throws InvalidObjectException, MetaException { + boolean success = false; + boolean commited = false; + try { + openTransaction(); + MCluster nameCheck = this.getMCluster(cluster.getName()); + if (nameCheck != null) { + throw new InvalidObjectException("Cluster " + + cluster.getName() + " already exists."); + } + MCluster mCluster = convertToMCluster(cluster); + pm.makePersistent(mCluster); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + private MCluster convertToMCluster(Cluster cluster) { + int now = (int)(System.currentTimeMillis()/1000); + MCluster ret = + new MCluster(cluster.getName(), cluster.getComment(), cluster + .getLocationUri(), now, cluster.getParameters()); + return ret; + } + + private MCluster getMCluster(String clusterName) { + MCluster mcluster = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MCluster.class, "clusterName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(true); + mcluster = (MCluster) query.execute(clusterName); + pm.retrieve(mcluster); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mcluster; + } + + @Override + public boolean dropCluster(String clusterName) + throws MetaException, InvalidOperationException, NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MCluster mCluster = getMCluster(clusterName); + if (mCluster == null) { + throw new NoSuchObjectException(clusterName + " cluster does not exist."); + } + boolean inUse = clusterIsLive(mCluster); + if(inUse) { + throw new InvalidOperationException("cluster " + + clusterName + " is still in use."); + } + pm.deletePersistent(mCluster); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public Cluster getCluster(String clusterName) throws MetaException { + MCluster mCluster = getMCluster(clusterName); + if (mCluster == null) { + return null; + } + Cluster ret = + new Cluster(mCluster.getClusterName(), mCluster + .getDescription(), mCluster.getCreateTime(), mCluster + .getLocation(), mCluster.getParameters()); + return ret; + } + + private boolean clusterIsLive(MCluster mcluster) { + MClusterStorageDescriptor mClusterSD = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MClusterStorageDescriptor.class, "cluster.clusterName == t1"); + query.declareParameters("java.lang.String t1"); + //only care about the first element + query.setUnique(true); + mClusterSD = (MClusterStorageDescriptor) query.execute(mcluster.getClusterName()); + pm.retrieve(mClusterSD); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mClusterSD == null; + } + + @Override + public List listClusters() throws MetaException { + + Collection mclusters = null; + List clusters = new ArrayList(); + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MCluster.class); + query.setUnique(false); + mclusters = (Collection) query.execute(); + pm.retrieveAll(mclusters); + for(MCluster mcluster: mclusters) { + clusters.add(convertToCluster(mcluster)); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return clusters; + } + + private Cluster convertToCluster(MCluster mcluster) { + return new Cluster(mcluster.getClusterName(), mcluster.getDescription(), + mcluster.getCreateTime(), mcluster.getLocation(), + mcluster.getParameters()); + } + + @Override + public void alterCluster(String clusterName, Cluster cluster) + throws MetaException { + boolean success = false; + try { + openTransaction(); + clusterName = clusterName.toLowerCase(); + + MCluster oldCluster = getMCluster(clusterName); + if (oldCluster == null) { + throw new MetaException("cluster " + clusterName + " doesn't exist"); + } + + oldCluster.setClusterName(cluster.getName().toLowerCase()); + oldCluster.setDescription(cluster.getComment()); + oldCluster.setLocation(cluster.getLocationUri()); + oldCluster.setParameters(cluster.getParameters()); + // commit the changes + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + } } Index: metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (revision 1235046) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py (working copy) @@ -945,6 +945,243 @@ def __ne__(self, other): return not (self == other) +class Cluster: + """ + Attributes: + - name + - comment + - createTime + - locationUri + - parameters + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'comment', None, None, ), # 2 + (3, TType.I32, 'createTime', None, None, ), # 3 + (4, TType.STRING, 'locationUri', None, None, ), # 4 + (5, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 5 + ) + + def __init__(self, name=None, comment=None, createTime=None, locationUri=None, parameters=None,): + self.name = name + self.comment = comment + self.createTime = createTime + self.locationUri = locationUri + self.parameters = parameters + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.comment = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.createTime = iprot.readI32(); + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.locationUri = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.parameters = {} + (_ktype70, _vtype71, _size69 ) = iprot.readMapBegin() + for _i73 in xrange(_size69): + _key74 = iprot.readString(); + _val75 = iprot.readString(); + self.parameters[_key74] = _val75 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('Cluster') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.comment is not None: + oprot.writeFieldBegin('comment', TType.STRING, 2) + oprot.writeString(self.comment) + oprot.writeFieldEnd() + if self.createTime is not None: + oprot.writeFieldBegin('createTime', TType.I32, 3) + oprot.writeI32(self.createTime) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin('locationUri', TType.STRING, 4) + oprot.writeString(self.locationUri) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin('parameters', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter76,viter77 in self.parameters.items(): + oprot.writeString(kiter76) + oprot.writeString(viter77) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ClusterStorageDescriptor: + """ + Attributes: + - cluster + - location + - primary + - dataSynced + - parameters + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'cluster', (Cluster, Cluster.thrift_spec), None, ), # 1 + (2, TType.STRING, 'location', None, None, ), # 2 + (3, TType.BOOL, 'primary', None, None, ), # 3 + (4, TType.BOOL, 'dataSynced', None, None, ), # 4 + (5, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 5 + ) + + def __init__(self, cluster=None, location=None, primary=None, dataSynced=None, parameters=None,): + self.cluster = cluster + self.location = location + self.primary = primary + self.dataSynced = dataSynced + self.parameters = parameters + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cluster = Cluster() + self.cluster.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.location = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.primary = iprot.readBool(); + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.dataSynced = iprot.readBool(); + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.MAP: + self.parameters = {} + (_ktype79, _vtype80, _size78 ) = iprot.readMapBegin() + for _i82 in xrange(_size78): + _key83 = iprot.readString(); + _val84 = iprot.readString(); + self.parameters[_key83] = _val84 + iprot.readMapEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ClusterStorageDescriptor') + if self.cluster is not None: + oprot.writeFieldBegin('cluster', TType.STRUCT, 1) + self.cluster.write(oprot) + oprot.writeFieldEnd() + if self.location is not None: + oprot.writeFieldBegin('location', TType.STRING, 2) + oprot.writeString(self.location) + oprot.writeFieldEnd() + if self.primary is not None: + oprot.writeFieldBegin('primary', TType.BOOL, 3) + oprot.writeBool(self.primary) + oprot.writeFieldEnd() + if self.dataSynced is not None: + oprot.writeFieldBegin('dataSynced', TType.BOOL, 4) + oprot.writeBool(self.dataSynced) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin('parameters', TType.MAP, 5) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter85,viter86 in self.parameters.items(): + oprot.writeString(kiter85) + oprot.writeString(viter86) + oprot.writeMapEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Database: """ Attributes: @@ -998,11 +1235,11 @@ elif fid == 4: if ftype == TType.MAP: self.parameters = {} - (_ktype70, _vtype71, _size69 ) = iprot.readMapBegin() - for _i73 in xrange(_size69): - _key74 = iprot.readString(); - _val75 = iprot.readString(); - self.parameters[_key74] = _val75 + (_ktype88, _vtype89, _size87 ) = iprot.readMapBegin() + for _i91 in xrange(_size87): + _key92 = iprot.readString(); + _val93 = iprot.readString(); + self.parameters[_key92] = _val93 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1037,9 +1274,9 @@ if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter76,viter77 in self.parameters.items(): - oprot.writeString(kiter76) - oprot.writeString(viter77) + for kiter94,viter95 in self.parameters.items(): + oprot.writeString(kiter94) + oprot.writeString(viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -1106,11 +1343,11 @@ elif fid == 3: if ftype == TType.MAP: self.parameters = {} - (_ktype79, _vtype80, _size78 ) = iprot.readMapBegin() - for _i82 in xrange(_size78): - _key83 = iprot.readString(); - _val84 = iprot.readString(); - self.parameters[_key83] = _val84 + (_ktype97, _vtype98, _size96 ) = iprot.readMapBegin() + for _i100 in xrange(_size96): + _key101 = iprot.readString(); + _val102 = iprot.readString(); + self.parameters[_key101] = _val102 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1135,9 +1372,9 @@ if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter85,viter86 in self.parameters.items(): - oprot.writeString(kiter85) - oprot.writeString(viter86) + for kiter103,viter104 in self.parameters.items(): + oprot.writeString(kiter103) + oprot.writeString(viter104) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1243,6 +1480,7 @@ - bucketCols - sortCols - parameters + - clusterStorage """ thrift_spec = ( @@ -1257,9 +1495,10 @@ (8, TType.LIST, 'bucketCols', (TType.STRING,None), None, ), # 8 (9, TType.LIST, 'sortCols', (TType.STRUCT,(Order, Order.thrift_spec)), None, ), # 9 (10, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 10 + (11, TType.LIST, 'clusterStorage', (TType.STRUCT,(ClusterStorageDescriptor, ClusterStorageDescriptor.thrift_spec)), None, ), # 11 ) - def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None,): + def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None, clusterStorage=None,): self.cols = cols self.location = location self.inputFormat = inputFormat @@ -1270,6 +1509,7 @@ self.bucketCols = bucketCols self.sortCols = sortCols self.parameters = parameters + self.clusterStorage = clusterStorage def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1283,11 +1523,11 @@ if fid == 1: if ftype == TType.LIST: self.cols = [] - (_etype90, _size87) = iprot.readListBegin() - for _i91 in xrange(_size87): - _elem92 = FieldSchema() - _elem92.read(iprot) - self.cols.append(_elem92) + (_etype108, _size105) = iprot.readListBegin() + for _i109 in xrange(_size105): + _elem110 = FieldSchema() + _elem110.read(iprot) + self.cols.append(_elem110) iprot.readListEnd() else: iprot.skip(ftype) @@ -1325,35 +1565,46 @@ elif fid == 8: if ftype == TType.LIST: self.bucketCols = [] - (_etype96, _size93) = iprot.readListBegin() - for _i97 in xrange(_size93): - _elem98 = iprot.readString(); - self.bucketCols.append(_elem98) + (_etype114, _size111) = iprot.readListBegin() + for _i115 in xrange(_size111): + _elem116 = iprot.readString(); + self.bucketCols.append(_elem116) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.LIST: self.sortCols = [] - (_etype102, _size99) = iprot.readListBegin() - for _i103 in xrange(_size99): - _elem104 = Order() - _elem104.read(iprot) - self.sortCols.append(_elem104) + (_etype120, _size117) = iprot.readListBegin() + for _i121 in xrange(_size117): + _elem122 = Order() + _elem122.read(iprot) + self.sortCols.append(_elem122) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP: self.parameters = {} - (_ktype106, _vtype107, _size105 ) = iprot.readMapBegin() - for _i109 in xrange(_size105): - _key110 = iprot.readString(); - _val111 = iprot.readString(); - self.parameters[_key110] = _val111 + (_ktype124, _vtype125, _size123 ) = iprot.readMapBegin() + for _i127 in xrange(_size123): + _key128 = iprot.readString(); + _val129 = iprot.readString(); + self.parameters[_key128] = _val129 iprot.readMapEnd() else: iprot.skip(ftype) + elif fid == 11: + if ftype == TType.LIST: + self.clusterStorage = [] + (_etype133, _size130) = iprot.readListBegin() + for _i134 in xrange(_size130): + _elem135 = ClusterStorageDescriptor() + _elem135.read(iprot) + self.clusterStorage.append(_elem135) + iprot.readListEnd() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1367,8 +1618,8 @@ if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter112 in self.cols: - iter112.write(oprot) + for iter136 in self.cols: + iter136.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.location is not None: @@ -1398,25 +1649,32 @@ if self.bucketCols is not None: oprot.writeFieldBegin('bucketCols', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.bucketCols)) - for iter113 in self.bucketCols: - oprot.writeString(iter113) + for iter137 in self.bucketCols: + oprot.writeString(iter137) oprot.writeListEnd() oprot.writeFieldEnd() if self.sortCols is not None: oprot.writeFieldBegin('sortCols', TType.LIST, 9) oprot.writeListBegin(TType.STRUCT, len(self.sortCols)) - for iter114 in self.sortCols: - iter114.write(oprot) + for iter138 in self.sortCols: + iter138.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter115,viter116 in self.parameters.items(): - oprot.writeString(kiter115) - oprot.writeString(viter116) + for kiter139,viter140 in self.parameters.items(): + oprot.writeString(kiter139) + oprot.writeString(viter140) oprot.writeMapEnd() oprot.writeFieldEnd() + if self.clusterStorage is not None: + oprot.writeFieldBegin('clusterStorage', TType.LIST, 11) + oprot.writeListBegin(TType.STRUCT, len(self.clusterStorage)) + for iter141 in self.clusterStorage: + iter141.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1533,22 +1791,22 @@ elif fid == 8: if ftype == TType.LIST: self.partitionKeys = [] - (_etype120, _size117) = iprot.readListBegin() - for _i121 in xrange(_size117): - _elem122 = FieldSchema() - _elem122.read(iprot) - self.partitionKeys.append(_elem122) + (_etype145, _size142) = iprot.readListBegin() + for _i146 in xrange(_size142): + _elem147 = FieldSchema() + _elem147.read(iprot) + self.partitionKeys.append(_elem147) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP: self.parameters = {} - (_ktype124, _vtype125, _size123 ) = iprot.readMapBegin() - for _i127 in xrange(_size123): - _key128 = iprot.readString(); - _val129 = iprot.readString(); - self.parameters[_key128] = _val129 + (_ktype149, _vtype150, _size148 ) = iprot.readMapBegin() + for _i152 in xrange(_size148): + _key153 = iprot.readString(); + _val154 = iprot.readString(); + self.parameters[_key153] = _val154 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1614,16 +1872,16 @@ if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter130 in self.partitionKeys: - iter130.write(oprot) + for iter155 in self.partitionKeys: + iter155.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter131,viter132 in self.parameters.items(): - oprot.writeString(kiter131) - oprot.writeString(viter132) + for kiter156,viter157 in self.parameters.items(): + oprot.writeString(kiter156) + oprot.writeString(viter157) oprot.writeMapEnd() oprot.writeFieldEnd() if self.viewOriginalText is not None: @@ -1707,10 +1965,10 @@ if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype136, _size133) = iprot.readListBegin() - for _i137 in xrange(_size133): - _elem138 = iprot.readString(); - self.values.append(_elem138) + (_etype161, _size158) = iprot.readListBegin() + for _i162 in xrange(_size158): + _elem163 = iprot.readString(); + self.values.append(_elem163) iprot.readListEnd() else: iprot.skip(ftype) @@ -1743,11 +2001,11 @@ elif fid == 7: if ftype == TType.MAP: self.parameters = {} - (_ktype140, _vtype141, _size139 ) = iprot.readMapBegin() - for _i143 in xrange(_size139): - _key144 = iprot.readString(); - _val145 = iprot.readString(); - self.parameters[_key144] = _val145 + (_ktype165, _vtype166, _size164 ) = iprot.readMapBegin() + for _i168 in xrange(_size164): + _key169 = iprot.readString(); + _val170 = iprot.readString(); + self.parameters[_key169] = _val170 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1770,8 +2028,8 @@ if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter146 in self.values: - oprot.writeString(iter146) + for iter171 in self.values: + oprot.writeString(iter171) oprot.writeListEnd() oprot.writeFieldEnd() if self.dbName is not None: @@ -1797,9 +2055,9 @@ if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 7) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter147,viter148 in self.parameters.items(): - oprot.writeString(kiter147) - oprot.writeString(viter148) + for kiter172,viter173 in self.parameters.items(): + oprot.writeString(kiter172) + oprot.writeString(viter173) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -1918,11 +2176,11 @@ elif fid == 9: if ftype == TType.MAP: self.parameters = {} - (_ktype150, _vtype151, _size149 ) = iprot.readMapBegin() - for _i153 in xrange(_size149): - _key154 = iprot.readString(); - _val155 = iprot.readString(); - self.parameters[_key154] = _val155 + (_ktype175, _vtype176, _size174 ) = iprot.readMapBegin() + for _i178 in xrange(_size174): + _key179 = iprot.readString(); + _val180 = iprot.readString(); + self.parameters[_key179] = _val180 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1976,9 +2234,9 @@ if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter156,viter157 in self.parameters.items(): - oprot.writeString(kiter156) - oprot.writeString(viter157) + for kiter181,viter182 in self.parameters.items(): + oprot.writeString(kiter181) + oprot.writeString(viter182) oprot.writeMapEnd() oprot.writeFieldEnd() if self.deferredRebuild is not None: @@ -2032,22 +2290,22 @@ if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype161, _size158) = iprot.readListBegin() - for _i162 in xrange(_size158): - _elem163 = FieldSchema() - _elem163.read(iprot) - self.fieldSchemas.append(_elem163) + (_etype186, _size183) = iprot.readListBegin() + for _i187 in xrange(_size183): + _elem188 = FieldSchema() + _elem188.read(iprot) + self.fieldSchemas.append(_elem188) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype165, _vtype166, _size164 ) = iprot.readMapBegin() - for _i168 in xrange(_size164): - _key169 = iprot.readString(); - _val170 = iprot.readString(); - self.properties[_key169] = _val170 + (_ktype190, _vtype191, _size189 ) = iprot.readMapBegin() + for _i193 in xrange(_size189): + _key194 = iprot.readString(); + _val195 = iprot.readString(); + self.properties[_key194] = _val195 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2064,16 +2322,16 @@ if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter171 in self.fieldSchemas: - iter171.write(oprot) + for iter196 in self.fieldSchemas: + iter196.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter172,viter173 in self.properties.items(): - oprot.writeString(kiter172) - oprot.writeString(viter173) + for kiter197,viter198 in self.properties.items(): + oprot.writeString(kiter197) + oprot.writeString(viter198) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (revision 1235046) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (working copy) @@ -121,6 +121,16 @@ """ pass + def drop_table_on_cluster(self, cluster_name, dbname, name, deleteData): + """ + Parameters: + - cluster_name + - dbname + - name + - deleteData + """ + pass + def get_tables(self, db_name, pattern): """ Parameters: @@ -212,6 +222,17 @@ """ pass + def drop_partition_on_cluster(self, cluster_name, db_name, tbl_name, part_vals, deleteData): + """ + Parameters: + - cluster_name + - db_name + - tbl_name + - part_vals + - deleteData + """ + pass + def drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData): """ Parameters: @@ -447,6 +468,38 @@ """ pass + def create_cluster(self, cluster): + """ + Parameters: + - cluster + """ + pass + + def drop_cluster(self, cluster_name): + """ + Parameters: + - cluster_name + """ + pass + + def get_cluster(self, cluster_name): + """ + Parameters: + - cluster_name + """ + pass + + def list_clusters(self, ): + pass + + def alter_cluster(self, cluster_name, cluster): + """ + Parameters: + - cluster_name + - cluster + """ + pass + def create_role(self, role): """ Parameters: @@ -1046,6 +1099,46 @@ raise result.o3 return + def drop_table_on_cluster(self, cluster_name, dbname, name, deleteData): + """ + Parameters: + - cluster_name + - dbname + - name + - deleteData + """ + self.send_drop_table_on_cluster(cluster_name, dbname, name, deleteData) + self.recv_drop_table_on_cluster() + + def send_drop_table_on_cluster(self, cluster_name, dbname, name, deleteData): + self._oprot.writeMessageBegin('drop_table_on_cluster', TMessageType.CALL, self._seqid) + args = drop_table_on_cluster_args() + args.cluster_name = cluster_name + args.dbname = dbname + args.name = name + args.deleteData = deleteData + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_table_on_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = drop_table_on_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + def get_tables(self, db_name, pattern): """ Parameters: @@ -1454,6 +1547,50 @@ raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partition failed: unknown result"); + def drop_partition_on_cluster(self, cluster_name, db_name, tbl_name, part_vals, deleteData): + """ + Parameters: + - cluster_name + - db_name + - tbl_name + - part_vals + - deleteData + """ + self.send_drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData) + return self.recv_drop_partition_on_cluster() + + def send_drop_partition_on_cluster(self, cluster_name, db_name, tbl_name, part_vals, deleteData): + self._oprot.writeMessageBegin('drop_partition_on_cluster', TMessageType.CALL, self._seqid) + args = drop_partition_on_cluster_args() + args.cluster_name = cluster_name + args.db_name = db_name + args.tbl_name = tbl_name + args.part_vals = part_vals + args.deleteData = deleteData + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_partition_on_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = drop_partition_on_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_partition_on_cluster failed: unknown result"); + def drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData): """ Parameters: @@ -2424,6 +2561,173 @@ raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_index_names failed: unknown result"); + def create_cluster(self, cluster): + """ + Parameters: + - cluster + """ + self.send_create_cluster(cluster) + self.recv_create_cluster() + + def send_create_cluster(self, cluster): + self._oprot.writeMessageBegin('create_cluster', TMessageType.CALL, self._seqid) + args = create_cluster_args() + args.cluster = cluster + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = create_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def drop_cluster(self, cluster_name): + """ + Parameters: + - cluster_name + """ + self.send_drop_cluster(cluster_name) + self.recv_drop_cluster() + + def send_drop_cluster(self, cluster_name): + self._oprot.writeMessageBegin('drop_cluster', TMessageType.CALL, self._seqid) + args = drop_cluster_args() + args.cluster_name = cluster_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = drop_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_cluster(self, cluster_name): + """ + Parameters: + - cluster_name + """ + self.send_get_cluster(cluster_name) + return self.recv_get_cluster() + + def send_get_cluster(self, cluster_name): + self._oprot.writeMessageBegin('get_cluster', TMessageType.CALL, self._seqid) + args = get_cluster_args() + args.cluster_name = cluster_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_cluster failed: unknown result"); + + def list_clusters(self, ): + self.send_list_clusters() + return self.recv_list_clusters() + + def send_list_clusters(self, ): + self._oprot.writeMessageBegin('list_clusters', TMessageType.CALL, self._seqid) + args = list_clusters_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_list_clusters(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = list_clusters_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "list_clusters failed: unknown result"); + + def alter_cluster(self, cluster_name, cluster): + """ + Parameters: + - cluster_name + - cluster + """ + self.send_alter_cluster(cluster_name, cluster) + self.recv_alter_cluster() + + def send_alter_cluster(self, cluster_name, cluster): + self._oprot.writeMessageBegin('alter_cluster', TMessageType.CALL, self._seqid) + args = alter_cluster_args() + args.cluster_name = cluster_name + args.cluster = cluster + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_cluster(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = alter_cluster_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + return + def create_role(self, role): """ Parameters: @@ -2911,6 +3215,7 @@ self._processMap["get_schema"] = Processor.process_get_schema self._processMap["create_table"] = Processor.process_create_table self._processMap["drop_table"] = Processor.process_drop_table + self._processMap["drop_table_on_cluster"] = Processor.process_drop_table_on_cluster self._processMap["get_tables"] = Processor.process_get_tables self._processMap["get_all_tables"] = Processor.process_get_all_tables self._processMap["get_table"] = Processor.process_get_table @@ -2922,6 +3227,7 @@ self._processMap["append_partition"] = Processor.process_append_partition self._processMap["append_partition_by_name"] = Processor.process_append_partition_by_name self._processMap["drop_partition"] = Processor.process_drop_partition + self._processMap["drop_partition_on_cluster"] = Processor.process_drop_partition_on_cluster self._processMap["drop_partition_by_name"] = Processor.process_drop_partition_by_name self._processMap["get_partition"] = Processor.process_get_partition self._processMap["get_partition_with_auth"] = Processor.process_get_partition_with_auth @@ -2947,6 +3253,11 @@ self._processMap["get_index_by_name"] = Processor.process_get_index_by_name self._processMap["get_indexes"] = Processor.process_get_indexes self._processMap["get_index_names"] = Processor.process_get_index_names + self._processMap["create_cluster"] = Processor.process_create_cluster + self._processMap["drop_cluster"] = Processor.process_drop_cluster + self._processMap["get_cluster"] = Processor.process_get_cluster + self._processMap["list_clusters"] = Processor.process_list_clusters + self._processMap["alter_cluster"] = Processor.process_alter_cluster self._processMap["create_role"] = Processor.process_create_role self._processMap["drop_role"] = Processor.process_drop_role self._processMap["get_role_names"] = Processor.process_get_role_names @@ -3209,6 +3520,24 @@ oprot.writeMessageEnd() oprot.trans.flush() + def process_drop_table_on_cluster(self, seqid, iprot, oprot): + args = drop_table_on_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_table_on_cluster_result() + try: + self._handler.drop_table_on_cluster(args.cluster_name, args.dbname, args.name, args.deleteData) + except NoSuchObjectException, o1: + result.o1 = o1 + except MetaException, o2: + result.o2 = o2 + except InvalidOperationException, o3: + result.o3 = o3 + oprot.writeMessageBegin("drop_table_on_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_tables(self, seqid, iprot, oprot): args = get_tables_args() args.read(iprot) @@ -3393,6 +3722,24 @@ oprot.writeMessageEnd() oprot.trans.flush() + def process_drop_partition_on_cluster(self, seqid, iprot, oprot): + args = drop_partition_on_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_partition_on_cluster_result() + try: + result.success = self._handler.drop_partition_on_cluster(args.cluster_name, args.db_name, args.tbl_name, args.part_vals, args.deleteData) + except NoSuchObjectException, o1: + result.o1 = o1 + except MetaException, o2: + result.o2 = o2 + except InvalidOperationException, o3: + result.o3 = o3 + oprot.writeMessageBegin("drop_partition_on_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_drop_partition_by_name(self, seqid, iprot, oprot): args = drop_partition_by_name_args() args.read(iprot) @@ -3801,6 +4148,92 @@ oprot.writeMessageEnd() oprot.trans.flush() + def process_create_cluster(self, seqid, iprot, oprot): + args = create_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_cluster_result() + try: + self._handler.create_cluster(args.cluster) + except AlreadyExistsException, o1: + result.o1 = o1 + except InvalidObjectException, o2: + result.o2 = o2 + except MetaException, o3: + result.o3 = o3 + oprot.writeMessageBegin("create_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_cluster(self, seqid, iprot, oprot): + args = drop_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_cluster_result() + try: + self._handler.drop_cluster(args.cluster_name) + except NoSuchObjectException, o1: + result.o1 = o1 + except InvalidOperationException, o2: + result.o2 = o2 + except MetaException, o3: + result.o3 = o3 + oprot.writeMessageBegin("drop_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_cluster(self, seqid, iprot, oprot): + args = get_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_cluster_result() + try: + result.success = self._handler.get_cluster(args.cluster_name) + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: + result.o2 = o2 + oprot.writeMessageBegin("get_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_list_clusters(self, seqid, iprot, oprot): + args = list_clusters_args() + args.read(iprot) + iprot.readMessageEnd() + result = list_clusters_result() + try: + result.success = self._handler.list_clusters() + except MetaException, o1: + result.o1 = o1 + oprot.writeMessageBegin("list_clusters", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_alter_cluster(self, seqid, iprot, oprot): + args = alter_cluster_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_cluster_result() + try: + self._handler.alter_cluster(args.cluster_name, args.cluster) + except MetaException, o1: + result.o1 = o1 + except InvalidObjectException, o2: + result.o2 = o2 + except NoSuchObjectException, o3: + result.o3 = o3 + except AlreadyExistsException, o4: + result.o4 = o4 + oprot.writeMessageBegin("alter_cluster", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_create_role(self, seqid, iprot, oprot): args = create_role_args() args.read(iprot) @@ -4553,10 +4986,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype177, _size174) = iprot.readListBegin() - for _i178 in xrange(_size174): - _elem179 = iprot.readString(); - self.success.append(_elem179) + (_etype202, _size199) = iprot.readListBegin() + for _i203 in xrange(_size199): + _elem204 = iprot.readString(); + self.success.append(_elem204) iprot.readListEnd() else: iprot.skip(ftype) @@ -4579,8 +5012,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter180 in self.success: - oprot.writeString(iter180) + for iter205 in self.success: + oprot.writeString(iter205) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -4675,10 +5108,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype184, _size181) = iprot.readListBegin() - for _i185 in xrange(_size181): - _elem186 = iprot.readString(); - self.success.append(_elem186) + (_etype209, _size206) = iprot.readListBegin() + for _i210 in xrange(_size206): + _elem211 = iprot.readString(); + self.success.append(_elem211) iprot.readListEnd() else: iprot.skip(ftype) @@ -4701,8 +5134,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter187 in self.success: - oprot.writeString(iter187) + for iter212 in self.success: + oprot.writeString(iter212) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -5412,12 +5845,12 @@ if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype189, _vtype190, _size188 ) = iprot.readMapBegin() - for _i192 in xrange(_size188): - _key193 = iprot.readString(); - _val194 = Type() - _val194.read(iprot) - self.success[_key193] = _val194 + (_ktype214, _vtype215, _size213 ) = iprot.readMapBegin() + for _i217 in xrange(_size213): + _key218 = iprot.readString(); + _val219 = Type() + _val219.read(iprot) + self.success[_key218] = _val219 iprot.readMapEnd() else: iprot.skip(ftype) @@ -5440,9 +5873,9 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter195,viter196 in self.success.items(): - oprot.writeString(kiter195) - viter196.write(oprot) + for kiter220,viter221 in self.success.items(): + oprot.writeString(kiter220) + viter221.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -5573,11 +6006,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype200, _size197) = iprot.readListBegin() - for _i201 in xrange(_size197): - _elem202 = FieldSchema() - _elem202.read(iprot) - self.success.append(_elem202) + (_etype225, _size222) = iprot.readListBegin() + for _i226 in xrange(_size222): + _elem227 = FieldSchema() + _elem227.read(iprot) + self.success.append(_elem227) iprot.readListEnd() else: iprot.skip(ftype) @@ -5612,8 +6045,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter203 in self.success: - iter203.write(oprot) + for iter228 in self.success: + iter228.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -5752,11 +6185,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype207, _size204) = iprot.readListBegin() - for _i208 in xrange(_size204): - _elem209 = FieldSchema() - _elem209.read(iprot) - self.success.append(_elem209) + (_etype232, _size229) = iprot.readListBegin() + for _i233 in xrange(_size229): + _elem234 = FieldSchema() + _elem234.read(iprot) + self.success.append(_elem234) iprot.readListEnd() else: iprot.skip(ftype) @@ -5791,8 +6224,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter210 in self.success: - iter210.write(oprot) + for iter235 in self.success: + iter235.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -6144,6 +6577,189 @@ def __ne__(self, other): return not (self == other) +class drop_table_on_cluster_args: + """ + Attributes: + - cluster_name + - dbname + - name + - deleteData + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'cluster_name', None, None, ), # 1 + (2, TType.STRING, 'dbname', None, None, ), # 2 + (3, TType.STRING, 'name', None, None, ), # 3 + (4, TType.BOOL, 'deleteData', None, None, ), # 4 + ) + + def __init__(self, cluster_name=None, dbname=None, name=None, deleteData=None,): + self.cluster_name = cluster_name + self.dbname = dbname + self.name = name + self.deleteData = deleteData + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.cluster_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.dbname = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_table_on_cluster_args') + if self.cluster_name is not None: + oprot.writeFieldBegin('cluster_name', TType.STRING, 1) + oprot.writeString(self.cluster_name) + oprot.writeFieldEnd() + if self.dbname is not None: + oprot.writeFieldBegin('dbname', TType.STRING, 2) + oprot.writeString(self.dbname) + oprot.writeFieldEnd() + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 3) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin('deleteData', TType.BOOL, 4) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_table_on_cluster_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidOperationException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_table_on_cluster_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_tables_args: """ Attributes: @@ -6244,10 +6860,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype214, _size211) = iprot.readListBegin() - for _i215 in xrange(_size211): - _elem216 = iprot.readString(); - self.success.append(_elem216) + (_etype239, _size236) = iprot.readListBegin() + for _i240 in xrange(_size236): + _elem241 = iprot.readString(); + self.success.append(_elem241) iprot.readListEnd() else: iprot.skip(ftype) @@ -6270,8 +6886,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter217 in self.success: - oprot.writeString(iter217) + for iter242 in self.success: + oprot.writeString(iter242) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -6384,10 +7000,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype221, _size218) = iprot.readListBegin() - for _i222 in xrange(_size218): - _elem223 = iprot.readString(); - self.success.append(_elem223) + (_etype246, _size243) = iprot.readListBegin() + for _i247 in xrange(_size243): + _elem248 = iprot.readString(); + self.success.append(_elem248) iprot.readListEnd() else: iprot.skip(ftype) @@ -6410,8 +7026,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter224 in self.success: - oprot.writeString(iter224) + for iter249 in self.success: + oprot.writeString(iter249) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -6628,10 +7244,10 @@ elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype228, _size225) = iprot.readListBegin() - for _i229 in xrange(_size225): - _elem230 = iprot.readString(); - self.tbl_names.append(_elem230) + (_etype253, _size250) = iprot.readListBegin() + for _i254 in xrange(_size250): + _elem255 = iprot.readString(); + self.tbl_names.append(_elem255) iprot.readListEnd() else: iprot.skip(ftype) @@ -6652,8 +7268,8 @@ if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter231 in self.tbl_names: - oprot.writeString(iter231) + for iter256 in self.tbl_names: + oprot.writeString(iter256) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6708,11 +7324,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype235, _size232) = iprot.readListBegin() - for _i236 in xrange(_size232): - _elem237 = Table() - _elem237.read(iprot) - self.success.append(_elem237) + (_etype260, _size257) = iprot.readListBegin() + for _i261 in xrange(_size257): + _elem262 = Table() + _elem262.read(iprot) + self.success.append(_elem262) iprot.readListEnd() else: iprot.skip(ftype) @@ -6747,8 +7363,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter238 in self.success: - iter238.write(oprot) + for iter263 in self.success: + iter263.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -6899,10 +7515,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype242, _size239) = iprot.readListBegin() - for _i243 in xrange(_size239): - _elem244 = iprot.readString(); - self.success.append(_elem244) + (_etype267, _size264) = iprot.readListBegin() + for _i268 in xrange(_size264): + _elem269 = iprot.readString(); + self.success.append(_elem269) iprot.readListEnd() else: iprot.skip(ftype) @@ -6937,8 +7553,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter245 in self.success: - oprot.writeString(iter245) + for iter270 in self.success: + oprot.writeString(iter270) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -7316,11 +7932,11 @@ if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype249, _size246) = iprot.readListBegin() - for _i250 in xrange(_size246): - _elem251 = Partition() - _elem251.read(iprot) - self.new_parts.append(_elem251) + (_etype274, _size271) = iprot.readListBegin() + for _i275 in xrange(_size271): + _elem276 = Partition() + _elem276.read(iprot) + self.new_parts.append(_elem276) iprot.readListEnd() else: iprot.skip(ftype) @@ -7337,8 +7953,8 @@ if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter252 in self.new_parts: - iter252.write(oprot) + for iter277 in self.new_parts: + iter277.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7499,10 +8115,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype256, _size253) = iprot.readListBegin() - for _i257 in xrange(_size253): - _elem258 = iprot.readString(); - self.part_vals.append(_elem258) + (_etype281, _size278) = iprot.readListBegin() + for _i282 in xrange(_size278): + _elem283 = iprot.readString(); + self.part_vals.append(_elem283) iprot.readListEnd() else: iprot.skip(ftype) @@ -7527,8 +8143,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter259 in self.part_vals: - oprot.writeString(iter259) + for iter284 in self.part_vals: + oprot.writeString(iter284) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7876,10 +8492,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype263, _size260) = iprot.readListBegin() - for _i264 in xrange(_size260): - _elem265 = iprot.readString(); - self.part_vals.append(_elem265) + (_etype288, _size285) = iprot.readListBegin() + for _i289 in xrange(_size285): + _elem290 = iprot.readString(); + self.part_vals.append(_elem290) iprot.readListEnd() else: iprot.skip(ftype) @@ -7909,8 +8525,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter266 in self.part_vals: - oprot.writeString(iter266) + for iter291 in self.part_vals: + oprot.writeString(iter291) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -8020,6 +8636,220 @@ def __ne__(self, other): return not (self == other) +class drop_partition_on_cluster_args: + """ + Attributes: + - cluster_name + - db_name + - tbl_name + - part_vals + - deleteData + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'cluster_name', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 + (4, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 4 + (5, TType.BOOL, 'deleteData', None, None, ), # 5 + ) + + def __init__(self, cluster_name=None, db_name=None, tbl_name=None, part_vals=None, deleteData=None,): + self.cluster_name = cluster_name + self.db_name = db_name + self.tbl_name = tbl_name + self.part_vals = part_vals + self.deleteData = deleteData + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.cluster_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.db_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.part_vals = [] + (_etype295, _size292) = iprot.readListBegin() + for _i296 in xrange(_size292): + _elem297 = iprot.readString(); + self.part_vals.append(_elem297) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_partition_on_cluster_args') + if self.cluster_name is not None: + oprot.writeFieldBegin('cluster_name', TType.STRING, 1) + oprot.writeString(self.cluster_name) + oprot.writeFieldEnd() + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 2) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.part_vals is not None: + oprot.writeFieldBegin('part_vals', TType.LIST, 4) + oprot.writeListBegin(TType.STRING, len(self.part_vals)) + for iter298 in self.part_vals: + oprot.writeString(iter298) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin('deleteData', TType.BOOL, 5) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_partition_on_cluster_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 3 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool(); + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = InvalidOperationException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_partition_on_cluster_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class drop_partition_by_name_args: """ Attributes: @@ -8243,10 +9073,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype270, _size267) = iprot.readListBegin() - for _i271 in xrange(_size267): - _elem272 = iprot.readString(); - self.part_vals.append(_elem272) + (_etype302, _size299) = iprot.readListBegin() + for _i303 in xrange(_size299): + _elem304 = iprot.readString(); + self.part_vals.append(_elem304) iprot.readListEnd() else: iprot.skip(ftype) @@ -8271,8 +9101,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter273 in self.part_vals: - oprot.writeString(iter273) + for iter305 in self.part_vals: + oprot.writeString(iter305) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8427,10 +9257,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype277, _size274) = iprot.readListBegin() - for _i278 in xrange(_size274): - _elem279 = iprot.readString(); - self.part_vals.append(_elem279) + (_etype309, _size306) = iprot.readListBegin() + for _i310 in xrange(_size306): + _elem311 = iprot.readString(); + self.part_vals.append(_elem311) iprot.readListEnd() else: iprot.skip(ftype) @@ -8442,10 +9272,10 @@ elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype283, _size280) = iprot.readListBegin() - for _i284 in xrange(_size280): - _elem285 = iprot.readString(); - self.group_names.append(_elem285) + (_etype315, _size312) = iprot.readListBegin() + for _i316 in xrange(_size312): + _elem317 = iprot.readString(); + self.group_names.append(_elem317) iprot.readListEnd() else: iprot.skip(ftype) @@ -8470,8 +9300,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter286 in self.part_vals: - oprot.writeString(iter286) + for iter318 in self.part_vals: + oprot.writeString(iter318) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -8481,8 +9311,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter287 in self.group_names: - oprot.writeString(iter287) + for iter319 in self.group_names: + oprot.writeString(iter319) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8874,11 +9704,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype291, _size288) = iprot.readListBegin() - for _i292 in xrange(_size288): - _elem293 = Partition() - _elem293.read(iprot) - self.success.append(_elem293) + (_etype323, _size320) = iprot.readListBegin() + for _i324 in xrange(_size320): + _elem325 = Partition() + _elem325.read(iprot) + self.success.append(_elem325) iprot.readListEnd() else: iprot.skip(ftype) @@ -8907,8 +9737,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter294 in self.success: - iter294.write(oprot) + for iter326 in self.success: + iter326.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -8995,10 +9825,10 @@ elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype298, _size295) = iprot.readListBegin() - for _i299 in xrange(_size295): - _elem300 = iprot.readString(); - self.group_names.append(_elem300) + (_etype330, _size327) = iprot.readListBegin() + for _i331 in xrange(_size327): + _elem332 = iprot.readString(); + self.group_names.append(_elem332) iprot.readListEnd() else: iprot.skip(ftype) @@ -9031,8 +9861,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter301 in self.group_names: - oprot.writeString(iter301) + for iter333 in self.group_names: + oprot.writeString(iter333) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9084,11 +9914,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype305, _size302) = iprot.readListBegin() - for _i306 in xrange(_size302): - _elem307 = Partition() - _elem307.read(iprot) - self.success.append(_elem307) + (_etype337, _size334) = iprot.readListBegin() + for _i338 in xrange(_size334): + _elem339 = Partition() + _elem339.read(iprot) + self.success.append(_elem339) iprot.readListEnd() else: iprot.skip(ftype) @@ -9117,8 +9947,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter308 in self.success: - iter308.write(oprot) + for iter340 in self.success: + iter340.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9259,10 +10089,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype312, _size309) = iprot.readListBegin() - for _i313 in xrange(_size309): - _elem314 = iprot.readString(); - self.success.append(_elem314) + (_etype344, _size341) = iprot.readListBegin() + for _i345 in xrange(_size341): + _elem346 = iprot.readString(); + self.success.append(_elem346) iprot.readListEnd() else: iprot.skip(ftype) @@ -9285,8 +10115,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter315 in self.success: - oprot.writeString(iter315) + for iter347 in self.success: + oprot.writeString(iter347) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -9356,10 +10186,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype319, _size316) = iprot.readListBegin() - for _i320 in xrange(_size316): - _elem321 = iprot.readString(); - self.part_vals.append(_elem321) + (_etype351, _size348) = iprot.readListBegin() + for _i352 in xrange(_size348): + _elem353 = iprot.readString(); + self.part_vals.append(_elem353) iprot.readListEnd() else: iprot.skip(ftype) @@ -9389,8 +10219,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter322 in self.part_vals: - oprot.writeString(iter322) + for iter354 in self.part_vals: + oprot.writeString(iter354) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -9446,11 +10276,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype326, _size323) = iprot.readListBegin() - for _i327 in xrange(_size323): - _elem328 = Partition() - _elem328.read(iprot) - self.success.append(_elem328) + (_etype358, _size355) = iprot.readListBegin() + for _i359 in xrange(_size355): + _elem360 = Partition() + _elem360.read(iprot) + self.success.append(_elem360) iprot.readListEnd() else: iprot.skip(ftype) @@ -9479,8 +10309,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter329 in self.success: - iter329.write(oprot) + for iter361 in self.success: + iter361.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9560,10 +10390,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype333, _size330) = iprot.readListBegin() - for _i334 in xrange(_size330): - _elem335 = iprot.readString(); - self.part_vals.append(_elem335) + (_etype365, _size362) = iprot.readListBegin() + for _i366 in xrange(_size362): + _elem367 = iprot.readString(); + self.part_vals.append(_elem367) iprot.readListEnd() else: iprot.skip(ftype) @@ -9580,10 +10410,10 @@ elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype339, _size336) = iprot.readListBegin() - for _i340 in xrange(_size336): - _elem341 = iprot.readString(); - self.group_names.append(_elem341) + (_etype371, _size368) = iprot.readListBegin() + for _i372 in xrange(_size368): + _elem373 = iprot.readString(); + self.group_names.append(_elem373) iprot.readListEnd() else: iprot.skip(ftype) @@ -9608,8 +10438,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter342 in self.part_vals: - oprot.writeString(iter342) + for iter374 in self.part_vals: + oprot.writeString(iter374) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -9623,8 +10453,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter343 in self.group_names: - oprot.writeString(iter343) + for iter375 in self.group_names: + oprot.writeString(iter375) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9676,11 +10506,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype347, _size344) = iprot.readListBegin() - for _i348 in xrange(_size344): - _elem349 = Partition() - _elem349.read(iprot) - self.success.append(_elem349) + (_etype379, _size376) = iprot.readListBegin() + for _i380 in xrange(_size376): + _elem381 = Partition() + _elem381.read(iprot) + self.success.append(_elem381) iprot.readListEnd() else: iprot.skip(ftype) @@ -9709,8 +10539,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter350 in self.success: - iter350.write(oprot) + for iter382 in self.success: + iter382.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9784,10 +10614,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype354, _size351) = iprot.readListBegin() - for _i355 in xrange(_size351): - _elem356 = iprot.readString(); - self.part_vals.append(_elem356) + (_etype386, _size383) = iprot.readListBegin() + for _i387 in xrange(_size383): + _elem388 = iprot.readString(); + self.part_vals.append(_elem388) iprot.readListEnd() else: iprot.skip(ftype) @@ -9817,8 +10647,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter357 in self.part_vals: - oprot.writeString(iter357) + for iter389 in self.part_vals: + oprot.writeString(iter389) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -9874,10 +10704,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype361, _size358) = iprot.readListBegin() - for _i362 in xrange(_size358): - _elem363 = iprot.readString(); - self.success.append(_elem363) + (_etype393, _size390) = iprot.readListBegin() + for _i394 in xrange(_size390): + _elem395 = iprot.readString(); + self.success.append(_elem395) iprot.readListEnd() else: iprot.skip(ftype) @@ -9906,8 +10736,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter364 in self.success: - oprot.writeString(iter364) + for iter396 in self.success: + oprot.writeString(iter396) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10063,11 +10893,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype368, _size365) = iprot.readListBegin() - for _i369 in xrange(_size365): - _elem370 = Partition() - _elem370.read(iprot) - self.success.append(_elem370) + (_etype400, _size397) = iprot.readListBegin() + for _i401 in xrange(_size397): + _elem402 = Partition() + _elem402.read(iprot) + self.success.append(_elem402) iprot.readListEnd() else: iprot.skip(ftype) @@ -10096,8 +10926,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter371 in self.success: - iter371.write(oprot) + for iter403 in self.success: + iter403.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10168,10 +10998,10 @@ elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype375, _size372) = iprot.readListBegin() - for _i376 in xrange(_size372): - _elem377 = iprot.readString(); - self.names.append(_elem377) + (_etype407, _size404) = iprot.readListBegin() + for _i408 in xrange(_size404): + _elem409 = iprot.readString(); + self.names.append(_elem409) iprot.readListEnd() else: iprot.skip(ftype) @@ -10196,8 +11026,8 @@ if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter378 in self.names: - oprot.writeString(iter378) + for iter410 in self.names: + oprot.writeString(iter410) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10249,11 +11079,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype382, _size379) = iprot.readListBegin() - for _i383 in xrange(_size379): - _elem384 = Partition() - _elem384.read(iprot) - self.success.append(_elem384) + (_etype414, _size411) = iprot.readListBegin() + for _i415 in xrange(_size411): + _elem416 = Partition() + _elem416.read(iprot) + self.success.append(_elem416) iprot.readListEnd() else: iprot.skip(ftype) @@ -10282,8 +11112,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter385 in self.success: - iter385.write(oprot) + for iter417 in self.success: + iter417.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10516,10 +11346,10 @@ elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype389, _size386) = iprot.readListBegin() - for _i390 in xrange(_size386): - _elem391 = iprot.readString(); - self.part_vals.append(_elem391) + (_etype421, _size418) = iprot.readListBegin() + for _i422 in xrange(_size418): + _elem423 = iprot.readString(); + self.part_vals.append(_elem423) iprot.readListEnd() else: iprot.skip(ftype) @@ -10550,8 +11380,8 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter392 in self.part_vals: - oprot.writeString(iter392) + for iter424 in self.part_vals: + oprot.writeString(iter424) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -10882,10 +11712,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype396, _size393) = iprot.readListBegin() - for _i397 in xrange(_size393): - _elem398 = iprot.readString(); - self.success.append(_elem398) + (_etype428, _size425) = iprot.readListBegin() + for _i429 in xrange(_size425): + _elem430 = iprot.readString(); + self.success.append(_elem430) iprot.readListEnd() else: iprot.skip(ftype) @@ -10908,8 +11738,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter399 in self.success: - oprot.writeString(iter399) + for iter431 in self.success: + oprot.writeString(iter431) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11022,11 +11852,11 @@ if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype401, _vtype402, _size400 ) = iprot.readMapBegin() - for _i404 in xrange(_size400): - _key405 = iprot.readString(); - _val406 = iprot.readString(); - self.success[_key405] = _val406 + (_ktype433, _vtype434, _size432 ) = iprot.readMapBegin() + for _i436 in xrange(_size432): + _key437 = iprot.readString(); + _val438 = iprot.readString(); + self.success[_key437] = _val438 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11049,9 +11879,9 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter407,viter408 in self.success.items(): - oprot.writeString(kiter407) - oprot.writeString(viter408) + for kiter439,viter440 in self.success.items(): + oprot.writeString(kiter439) + oprot.writeString(viter440) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11121,11 +11951,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype410, _vtype411, _size409 ) = iprot.readMapBegin() - for _i413 in xrange(_size409): - _key414 = iprot.readString(); - _val415 = iprot.readString(); - self.part_vals[_key414] = _val415 + (_ktype442, _vtype443, _size441 ) = iprot.readMapBegin() + for _i445 in xrange(_size441): + _key446 = iprot.readString(); + _val447 = iprot.readString(); + self.part_vals[_key446] = _val447 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11155,9 +11985,9 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter416,viter417 in self.part_vals.items(): - oprot.writeString(kiter416) - oprot.writeString(viter417) + for kiter448,viter449 in self.part_vals.items(): + oprot.writeString(kiter448) + oprot.writeString(viter449) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -11353,11 +12183,11 @@ elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype419, _vtype420, _size418 ) = iprot.readMapBegin() - for _i422 in xrange(_size418): - _key423 = iprot.readString(); - _val424 = iprot.readString(); - self.part_vals[_key423] = _val424 + (_ktype451, _vtype452, _size450 ) = iprot.readMapBegin() + for _i454 in xrange(_size450): + _key455 = iprot.readString(); + _val456 = iprot.readString(); + self.part_vals[_key455] = _val456 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11387,9 +12217,9 @@ if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter425,viter426 in self.part_vals.items(): - oprot.writeString(kiter425) - oprot.writeString(viter426) + for kiter457,viter458 in self.part_vals.items(): + oprot.writeString(kiter457) + oprot.writeString(viter458) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -12361,11 +13191,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = Index() - _elem432.read(iprot) - self.success.append(_elem432) + (_etype462, _size459) = iprot.readListBegin() + for _i463 in xrange(_size459): + _elem464 = Index() + _elem464.read(iprot) + self.success.append(_elem464) iprot.readListEnd() else: iprot.skip(ftype) @@ -12394,8 +13224,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter433 in self.success: - iter433.write(oprot) + for iter465 in self.success: + iter465.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12536,10 +13366,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype437, _size434) = iprot.readListBegin() - for _i438 in xrange(_size434): - _elem439 = iprot.readString(); - self.success.append(_elem439) + (_etype469, _size466) = iprot.readListBegin() + for _i470 in xrange(_size466): + _elem471 = iprot.readString(); + self.success.append(_elem471) iprot.readListEnd() else: iprot.skip(ftype) @@ -12562,8 +13392,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter440 in self.success: - oprot.writeString(iter440) + for iter472 in self.success: + oprot.writeString(iter472) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -12588,6 +13418,743 @@ def __ne__(self, other): return not (self == other) +class create_cluster_args: + """ + Attributes: + - cluster + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'cluster', (Cluster, Cluster.thrift_spec), None, ), # 1 + ) + + def __init__(self, cluster=None,): + self.cluster = cluster + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.cluster = Cluster() + self.cluster.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('create_cluster_args') + if self.cluster is not None: + oprot.writeFieldBegin('cluster', TType.STRUCT, 1) + self.cluster.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class create_cluster_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = AlreadyExistsException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('create_cluster_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_cluster_args: + """ + Attributes: + - cluster_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'cluster_name', None, None, ), # 1 + ) + + def __init__(self, cluster_name=None,): + self.cluster_name = cluster_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.cluster_name = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_cluster_args') + if self.cluster_name is not None: + oprot.writeFieldBegin('cluster_name', TType.STRING, 1) + oprot.writeString(self.cluster_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_cluster_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_cluster_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_cluster_args: + """ + Attributes: + - cluster_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'cluster_name', None, None, ), # 1 + ) + + def __init__(self, cluster_name=None,): + self.cluster_name = cluster_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.cluster_name = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_cluster_args') + if self.cluster_name is not None: + oprot.writeFieldBegin('cluster_name', TType.STRING, 1) + oprot.writeString(self.cluster_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_cluster_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (Cluster, Cluster.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Cluster() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_cluster_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_clusters_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_clusters_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class list_clusters_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(Cluster, Cluster.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype476, _size473) = iprot.readListBegin() + for _i477 in xrange(_size473): + _elem478 = Cluster() + _elem478.read(iprot) + self.success.append(_elem478) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('list_clusters_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter479 in self.success: + iter479.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_cluster_args: + """ + Attributes: + - cluster_name + - cluster + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'cluster_name', None, None, ), # 1 + (2, TType.STRUCT, 'cluster', (Cluster, Cluster.thrift_spec), None, ), # 2 + ) + + def __init__(self, cluster_name=None, cluster=None,): + self.cluster_name = cluster_name + self.cluster = cluster + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.cluster_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.cluster = Cluster() + self.cluster.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_cluster_args') + if self.cluster_name is not None: + oprot.writeFieldBegin('cluster_name', TType.STRING, 1) + oprot.writeString(self.cluster_name) + oprot.writeFieldEnd() + if self.cluster is not None: + oprot.writeFieldBegin('cluster', TType.STRUCT, 2) + self.cluster.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_cluster_result: + """ + Attributes: + - o1 + - o2 + - o3 + - o4 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 4 + ) + + def __init__(self, o1=None, o2=None, o3=None, o4=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = NoSuchObjectException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = AlreadyExistsException() + self.o4.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_cluster_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class create_role_args: """ Attributes: @@ -12923,10 +14490,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = iprot.readString(); - self.success.append(_elem446) + (_etype483, _size480) = iprot.readListBegin() + for _i484 in xrange(_size480): + _elem485 = iprot.readString(); + self.success.append(_elem485) iprot.readListEnd() else: iprot.skip(ftype) @@ -12949,8 +14516,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter447 in self.success: - oprot.writeString(iter447) + for iter486 in self.success: + oprot.writeString(iter486) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13423,11 +14990,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype451, _size448) = iprot.readListBegin() - for _i452 in xrange(_size448): - _elem453 = Role() - _elem453.read(iprot) - self.success.append(_elem453) + (_etype490, _size487) = iprot.readListBegin() + for _i491 in xrange(_size487): + _elem492 = Role() + _elem492.read(iprot) + self.success.append(_elem492) iprot.readListEnd() else: iprot.skip(ftype) @@ -13450,8 +15017,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter454 in self.success: - iter454.write(oprot) + for iter493 in self.success: + iter493.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13519,10 +15086,10 @@ elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype458, _size455) = iprot.readListBegin() - for _i459 in xrange(_size455): - _elem460 = iprot.readString(); - self.group_names.append(_elem460) + (_etype497, _size494) = iprot.readListBegin() + for _i498 in xrange(_size494): + _elem499 = iprot.readString(); + self.group_names.append(_elem499) iprot.readListEnd() else: iprot.skip(ftype) @@ -13547,8 +15114,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter461 in self.group_names: - oprot.writeString(iter461) + for iter500 in self.group_names: + oprot.writeString(iter500) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13755,11 +15322,11 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype465, _size462) = iprot.readListBegin() - for _i466 in xrange(_size462): - _elem467 = HiveObjectPrivilege() - _elem467.read(iprot) - self.success.append(_elem467) + (_etype504, _size501) = iprot.readListBegin() + for _i505 in xrange(_size501): + _elem506 = HiveObjectPrivilege() + _elem506.read(iprot) + self.success.append(_elem506) iprot.readListEnd() else: iprot.skip(ftype) @@ -13782,8 +15349,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter468 in self.success: - iter468.write(oprot) + for iter507 in self.success: + iter507.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14108,10 +15675,10 @@ elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype472, _size469) = iprot.readListBegin() - for _i473 in xrange(_size469): - _elem474 = iprot.readString(); - self.group_names.append(_elem474) + (_etype511, _size508) = iprot.readListBegin() + for _i512 in xrange(_size508): + _elem513 = iprot.readString(); + self.group_names.append(_elem513) iprot.readListEnd() else: iprot.skip(ftype) @@ -14132,8 +15699,8 @@ if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter475 in self.group_names: - oprot.writeString(iter475) + for iter514 in self.group_names: + oprot.writeString(iter514) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14182,10 +15749,10 @@ if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype479, _size476) = iprot.readListBegin() - for _i480 in xrange(_size476): - _elem481 = iprot.readString(); - self.success.append(_elem481) + (_etype518, _size515) = iprot.readListBegin() + for _i519 in xrange(_size515): + _elem520 = iprot.readString(); + self.success.append(_elem520) iprot.readListEnd() else: iprot.skip(ftype) @@ -14208,8 +15775,8 @@ if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter482 in self.success: - oprot.writeString(iter482) + for iter521 in self.success: + oprot.writeString(iter521) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: Index: metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote =================================================================== --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (revision 1235046) +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote (working copy) @@ -35,6 +35,7 @@ print ' get_schema(string db_name, string table_name)' print ' void create_table(Table tbl)' print ' void drop_table(string dbname, string name, bool deleteData)' + print ' void drop_table_on_cluster(string cluster_name, string dbname, string name, bool deleteData)' print ' get_tables(string db_name, string pattern)' print ' get_all_tables(string db_name)' print ' Table get_table(string dbname, string tbl_name)' @@ -46,6 +47,7 @@ print ' Partition append_partition(string db_name, string tbl_name, part_vals)' print ' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)' print ' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)' + print ' bool drop_partition_on_cluster(string cluster_name, string db_name, string tbl_name, part_vals, bool deleteData)' print ' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)' print ' Partition get_partition(string db_name, string tbl_name, part_vals)' print ' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)' @@ -71,6 +73,11 @@ print ' Index get_index_by_name(string db_name, string tbl_name, string index_name)' print ' get_indexes(string db_name, string tbl_name, i16 max_indexes)' print ' get_index_names(string db_name, string tbl_name, i16 max_indexes)' + print ' void create_cluster(Cluster cluster)' + print ' void drop_cluster(string cluster_name)' + print ' Cluster get_cluster(string cluster_name)' + print ' list_clusters()' + print ' void alter_cluster(string cluster_name, Cluster cluster)' print ' bool create_role(Role role)' print ' bool drop_role(string role_name)' print ' get_role_names()' @@ -219,6 +226,12 @@ sys.exit(1) pp.pprint(client.drop_table(args[0],args[1],eval(args[2]),)) +elif cmd == 'drop_table_on_cluster': + if len(args) != 4: + print 'drop_table_on_cluster requires 4 args' + sys.exit(1) + pp.pprint(client.drop_table_on_cluster(args[0],args[1],args[2],eval(args[3]),)) + elif cmd == 'get_tables': if len(args) != 2: print 'get_tables requires 2 args' @@ -285,6 +298,12 @@ sys.exit(1) pp.pprint(client.drop_partition(args[0],args[1],eval(args[2]),eval(args[3]),)) +elif cmd == 'drop_partition_on_cluster': + if len(args) != 5: + print 'drop_partition_on_cluster requires 5 args' + sys.exit(1) + pp.pprint(client.drop_partition_on_cluster(args[0],args[1],args[2],eval(args[3]),eval(args[4]),)) + elif cmd == 'drop_partition_by_name': if len(args) != 4: print 'drop_partition_by_name requires 4 args' @@ -435,6 +454,36 @@ sys.exit(1) pp.pprint(client.get_index_names(args[0],args[1],eval(args[2]),)) +elif cmd == 'create_cluster': + if len(args) != 1: + print 'create_cluster requires 1 args' + sys.exit(1) + pp.pprint(client.create_cluster(eval(args[0]),)) + +elif cmd == 'drop_cluster': + if len(args) != 1: + print 'drop_cluster requires 1 args' + sys.exit(1) + pp.pprint(client.drop_cluster(args[0],)) + +elif cmd == 'get_cluster': + if len(args) != 1: + print 'get_cluster requires 1 args' + sys.exit(1) + pp.pprint(client.get_cluster(args[0],)) + +elif cmd == 'list_clusters': + if len(args) != 0: + print 'list_clusters requires 0 args' + sys.exit(1) + pp.pprint(client.list_clusters()) + +elif cmd == 'alter_cluster': + if len(args) != 2: + print 'alter_cluster requires 2 args' + sys.exit(1) + pp.pprint(client.alter_cluster(args[0],eval(args[1]),)) + elif cmd == 'create_role': if len(args) != 1: print 'create_role requires 1 args' Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (revision 1235046) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp (working copy) @@ -715,14 +715,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size155; - ::apache::thrift::protocol::TType _etype158; - iprot->readListBegin(_etype158, _size155); - this->success.resize(_size155); - uint32_t _i159; - for (_i159 = 0; _i159 < _size155; ++_i159) + uint32_t _size177; + ::apache::thrift::protocol::TType _etype180; + iprot->readListBegin(_etype180, _size177); + this->success.resize(_size177); + uint32_t _i181; + for (_i181 = 0; _i181 < _size177; ++_i181) { - xfer += iprot->readString(this->success[_i159]); + xfer += iprot->readString(this->success[_i181]); } iprot->readListEnd(); } @@ -761,10 +761,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter160; - for (_iter160 = this->success.begin(); _iter160 != this->success.end(); ++_iter160) + std::vector ::const_iterator _iter182; + for (_iter182 = this->success.begin(); _iter182 != this->success.end(); ++_iter182) { - xfer += oprot->writeString((*_iter160)); + xfer += oprot->writeString((*_iter182)); } xfer += oprot->writeListEnd(); } @@ -803,14 +803,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size161; - ::apache::thrift::protocol::TType _etype164; - iprot->readListBegin(_etype164, _size161); - (*(this->success)).resize(_size161); - uint32_t _i165; - for (_i165 = 0; _i165 < _size161; ++_i165) + uint32_t _size183; + ::apache::thrift::protocol::TType _etype186; + iprot->readListBegin(_etype186, _size183); + (*(this->success)).resize(_size183); + uint32_t _i187; + for (_i187 = 0; _i187 < _size183; ++_i187) { - xfer += iprot->readString((*(this->success))[_i165]); + xfer += iprot->readString((*(this->success))[_i187]); } iprot->readListEnd(); } @@ -911,14 +911,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size166; - ::apache::thrift::protocol::TType _etype169; - iprot->readListBegin(_etype169, _size166); - this->success.resize(_size166); - uint32_t _i170; - for (_i170 = 0; _i170 < _size166; ++_i170) + uint32_t _size188; + ::apache::thrift::protocol::TType _etype191; + iprot->readListBegin(_etype191, _size188); + this->success.resize(_size188); + uint32_t _i192; + for (_i192 = 0; _i192 < _size188; ++_i192) { - xfer += iprot->readString(this->success[_i170]); + xfer += iprot->readString(this->success[_i192]); } iprot->readListEnd(); } @@ -957,10 +957,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter171; - for (_iter171 = this->success.begin(); _iter171 != this->success.end(); ++_iter171) + std::vector ::const_iterator _iter193; + for (_iter193 = this->success.begin(); _iter193 != this->success.end(); ++_iter193) { - xfer += oprot->writeString((*_iter171)); + xfer += oprot->writeString((*_iter193)); } xfer += oprot->writeListEnd(); } @@ -999,14 +999,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size172; - ::apache::thrift::protocol::TType _etype175; - iprot->readListBegin(_etype175, _size172); - (*(this->success)).resize(_size172); - uint32_t _i176; - for (_i176 = 0; _i176 < _size172; ++_i176) + uint32_t _size194; + ::apache::thrift::protocol::TType _etype197; + iprot->readListBegin(_etype197, _size194); + (*(this->success)).resize(_size194); + uint32_t _i198; + for (_i198 = 0; _i198 < _size194; ++_i198) { - xfer += iprot->readString((*(this->success))[_i176]); + xfer += iprot->readString((*(this->success))[_i198]); } iprot->readListEnd(); } @@ -1927,17 +1927,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size177; - ::apache::thrift::protocol::TType _ktype178; - ::apache::thrift::protocol::TType _vtype179; - iprot->readMapBegin(_ktype178, _vtype179, _size177); - uint32_t _i181; - for (_i181 = 0; _i181 < _size177; ++_i181) + uint32_t _size199; + ::apache::thrift::protocol::TType _ktype200; + ::apache::thrift::protocol::TType _vtype201; + iprot->readMapBegin(_ktype200, _vtype201, _size199); + uint32_t _i203; + for (_i203 = 0; _i203 < _size199; ++_i203) { - std::string _key182; - xfer += iprot->readString(_key182); - Type& _val183 = this->success[_key182]; - xfer += _val183.read(iprot); + std::string _key204; + xfer += iprot->readString(_key204); + Type& _val205 = this->success[_key204]; + xfer += _val205.read(iprot); } iprot->readMapEnd(); } @@ -1976,11 +1976,11 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter184; - for (_iter184 = this->success.begin(); _iter184 != this->success.end(); ++_iter184) + std::map ::const_iterator _iter206; + for (_iter206 = this->success.begin(); _iter206 != this->success.end(); ++_iter206) { - xfer += oprot->writeString(_iter184->first); - xfer += _iter184->second.write(oprot); + xfer += oprot->writeString(_iter206->first); + xfer += _iter206->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2019,17 +2019,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size185; - ::apache::thrift::protocol::TType _ktype186; - ::apache::thrift::protocol::TType _vtype187; - iprot->readMapBegin(_ktype186, _vtype187, _size185); - uint32_t _i189; - for (_i189 = 0; _i189 < _size185; ++_i189) + uint32_t _size207; + ::apache::thrift::protocol::TType _ktype208; + ::apache::thrift::protocol::TType _vtype209; + iprot->readMapBegin(_ktype208, _vtype209, _size207); + uint32_t _i211; + for (_i211 = 0; _i211 < _size207; ++_i211) { - std::string _key190; - xfer += iprot->readString(_key190); - Type& _val191 = (*(this->success))[_key190]; - xfer += _val191.read(iprot); + std::string _key212; + xfer += iprot->readString(_key212); + Type& _val213 = (*(this->success))[_key212]; + xfer += _val213.read(iprot); } iprot->readMapEnd(); } @@ -2158,14 +2158,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size192; - ::apache::thrift::protocol::TType _etype195; - iprot->readListBegin(_etype195, _size192); - this->success.resize(_size192); - uint32_t _i196; - for (_i196 = 0; _i196 < _size192; ++_i196) + uint32_t _size214; + ::apache::thrift::protocol::TType _etype217; + iprot->readListBegin(_etype217, _size214); + this->success.resize(_size214); + uint32_t _i218; + for (_i218 = 0; _i218 < _size214; ++_i218) { - xfer += this->success[_i196].read(iprot); + xfer += this->success[_i218].read(iprot); } iprot->readListEnd(); } @@ -2220,10 +2220,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter197; - for (_iter197 = this->success.begin(); _iter197 != this->success.end(); ++_iter197) + std::vector ::const_iterator _iter219; + for (_iter219 = this->success.begin(); _iter219 != this->success.end(); ++_iter219) { - xfer += (*_iter197).write(oprot); + xfer += (*_iter219).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2270,14 +2270,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size198; - ::apache::thrift::protocol::TType _etype201; - iprot->readListBegin(_etype201, _size198); - (*(this->success)).resize(_size198); - uint32_t _i202; - for (_i202 = 0; _i202 < _size198; ++_i202) + uint32_t _size220; + ::apache::thrift::protocol::TType _etype223; + iprot->readListBegin(_etype223, _size220); + (*(this->success)).resize(_size220); + uint32_t _i224; + for (_i224 = 0; _i224 < _size220; ++_i224) { - xfer += (*(this->success))[_i202].read(iprot); + xfer += (*(this->success))[_i224].read(iprot); } iprot->readListEnd(); } @@ -2422,14 +2422,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size203; - ::apache::thrift::protocol::TType _etype206; - iprot->readListBegin(_etype206, _size203); - this->success.resize(_size203); - uint32_t _i207; - for (_i207 = 0; _i207 < _size203; ++_i207) + uint32_t _size225; + ::apache::thrift::protocol::TType _etype228; + iprot->readListBegin(_etype228, _size225); + this->success.resize(_size225); + uint32_t _i229; + for (_i229 = 0; _i229 < _size225; ++_i229) { - xfer += this->success[_i207].read(iprot); + xfer += this->success[_i229].read(iprot); } iprot->readListEnd(); } @@ -2484,10 +2484,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter208; - for (_iter208 = this->success.begin(); _iter208 != this->success.end(); ++_iter208) + std::vector ::const_iterator _iter230; + for (_iter230 = this->success.begin(); _iter230 != this->success.end(); ++_iter230) { - xfer += (*_iter208).write(oprot); + xfer += (*_iter230).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2534,14 +2534,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size209; - ::apache::thrift::protocol::TType _etype212; - iprot->readListBegin(_etype212, _size209); - (*(this->success)).resize(_size209); - uint32_t _i213; - for (_i213 = 0; _i213 < _size209; ++_i213) + uint32_t _size231; + ::apache::thrift::protocol::TType _etype234; + iprot->readListBegin(_etype234, _size231); + (*(this->success)).resize(_size231); + uint32_t _i235; + for (_i235 = 0; _i235 < _size231; ++_i235) { - xfer += (*(this->success))[_i213].read(iprot); + xfer += (*(this->success))[_i235].read(iprot); } iprot->readListEnd(); } @@ -3010,6 +3010,246 @@ return xfer; } +uint32_t ThriftHiveMetastore_drop_table_on_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cluster_name); + this->__isset.cluster_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->deleteData); + this->__isset.deleteData = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_table_on_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_on_cluster_args"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->cluster_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->deleteData); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_table_on_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_on_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->cluster_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool((*(this->deleteData))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_table_on_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_table_on_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_on_cluster_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_table_on_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_get_tables_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -3110,14 +3350,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size214; - ::apache::thrift::protocol::TType _etype217; - iprot->readListBegin(_etype217, _size214); - this->success.resize(_size214); - uint32_t _i218; - for (_i218 = 0; _i218 < _size214; ++_i218) + uint32_t _size236; + ::apache::thrift::protocol::TType _etype239; + iprot->readListBegin(_etype239, _size236); + this->success.resize(_size236); + uint32_t _i240; + for (_i240 = 0; _i240 < _size236; ++_i240) { - xfer += iprot->readString(this->success[_i218]); + xfer += iprot->readString(this->success[_i240]); } iprot->readListEnd(); } @@ -3156,10 +3396,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter219; - for (_iter219 = this->success.begin(); _iter219 != this->success.end(); ++_iter219) + std::vector ::const_iterator _iter241; + for (_iter241 = this->success.begin(); _iter241 != this->success.end(); ++_iter241) { - xfer += oprot->writeString((*_iter219)); + xfer += oprot->writeString((*_iter241)); } xfer += oprot->writeListEnd(); } @@ -3198,14 +3438,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size220; - ::apache::thrift::protocol::TType _etype223; - iprot->readListBegin(_etype223, _size220); - (*(this->success)).resize(_size220); - uint32_t _i224; - for (_i224 = 0; _i224 < _size220; ++_i224) + uint32_t _size242; + ::apache::thrift::protocol::TType _etype245; + iprot->readListBegin(_etype245, _size242); + (*(this->success)).resize(_size242); + uint32_t _i246; + for (_i246 = 0; _i246 < _size242; ++_i246) { - xfer += iprot->readString((*(this->success))[_i224]); + xfer += iprot->readString((*(this->success))[_i246]); } iprot->readListEnd(); } @@ -3320,14 +3560,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size225; - ::apache::thrift::protocol::TType _etype228; - iprot->readListBegin(_etype228, _size225); - this->success.resize(_size225); - uint32_t _i229; - for (_i229 = 0; _i229 < _size225; ++_i229) + uint32_t _size247; + ::apache::thrift::protocol::TType _etype250; + iprot->readListBegin(_etype250, _size247); + this->success.resize(_size247); + uint32_t _i251; + for (_i251 = 0; _i251 < _size247; ++_i251) { - xfer += iprot->readString(this->success[_i229]); + xfer += iprot->readString(this->success[_i251]); } iprot->readListEnd(); } @@ -3366,10 +3606,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter230; - for (_iter230 = this->success.begin(); _iter230 != this->success.end(); ++_iter230) + std::vector ::const_iterator _iter252; + for (_iter252 = this->success.begin(); _iter252 != this->success.end(); ++_iter252) { - xfer += oprot->writeString((*_iter230)); + xfer += oprot->writeString((*_iter252)); } xfer += oprot->writeListEnd(); } @@ -3408,14 +3648,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size231; - ::apache::thrift::protocol::TType _etype234; - iprot->readListBegin(_etype234, _size231); - (*(this->success)).resize(_size231); - uint32_t _i235; - for (_i235 = 0; _i235 < _size231; ++_i235) + uint32_t _size253; + ::apache::thrift::protocol::TType _etype256; + iprot->readListBegin(_etype256, _size253); + (*(this->success)).resize(_size253); + uint32_t _i257; + for (_i257 = 0; _i257 < _size253; ++_i257) { - xfer += iprot->readString((*(this->success))[_i235]); + xfer += iprot->readString((*(this->success))[_i257]); } iprot->readListEnd(); } @@ -3688,14 +3928,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size236; - ::apache::thrift::protocol::TType _etype239; - iprot->readListBegin(_etype239, _size236); - this->tbl_names.resize(_size236); - uint32_t _i240; - for (_i240 = 0; _i240 < _size236; ++_i240) + uint32_t _size258; + ::apache::thrift::protocol::TType _etype261; + iprot->readListBegin(_etype261, _size258); + this->tbl_names.resize(_size258); + uint32_t _i262; + for (_i262 = 0; _i262 < _size258; ++_i262) { - xfer += iprot->readString(this->tbl_names[_i240]); + xfer += iprot->readString(this->tbl_names[_i262]); } iprot->readListEnd(); } @@ -3725,10 +3965,10 @@ xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter241; - for (_iter241 = this->tbl_names.begin(); _iter241 != this->tbl_names.end(); ++_iter241) + std::vector ::const_iterator _iter263; + for (_iter263 = this->tbl_names.begin(); _iter263 != this->tbl_names.end(); ++_iter263) { - xfer += oprot->writeString((*_iter241)); + xfer += oprot->writeString((*_iter263)); } xfer += oprot->writeListEnd(); } @@ -3747,10 +3987,10 @@ xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter242; - for (_iter242 = (*(this->tbl_names)).begin(); _iter242 != (*(this->tbl_names)).end(); ++_iter242) + std::vector ::const_iterator _iter264; + for (_iter264 = (*(this->tbl_names)).begin(); _iter264 != (*(this->tbl_names)).end(); ++_iter264) { - xfer += oprot->writeString((*_iter242)); + xfer += oprot->writeString((*_iter264)); } xfer += oprot->writeListEnd(); } @@ -3784,14 +4024,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size243; - ::apache::thrift::protocol::TType _etype246; - iprot->readListBegin(_etype246, _size243); - this->success.resize(_size243); - uint32_t _i247; - for (_i247 = 0; _i247 < _size243; ++_i247) + uint32_t _size265; + ::apache::thrift::protocol::TType _etype268; + iprot->readListBegin(_etype268, _size265); + this->success.resize(_size265); + uint32_t _i269; + for (_i269 = 0; _i269 < _size265; ++_i269) { - xfer += this->success[_i247].read(iprot); + xfer += this->success[_i269].read(iprot); } iprot->readListEnd(); } @@ -3846,10 +4086,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter248; - for (_iter248 = this->success.begin(); _iter248 != this->success.end(); ++_iter248) + std::vector
::const_iterator _iter270; + for (_iter270 = this->success.begin(); _iter270 != this->success.end(); ++_iter270) { - xfer += (*_iter248).write(oprot); + xfer += (*_iter270).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3896,14 +4136,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size249; - ::apache::thrift::protocol::TType _etype252; - iprot->readListBegin(_etype252, _size249); - (*(this->success)).resize(_size249); - uint32_t _i253; - for (_i253 = 0; _i253 < _size249; ++_i253) + uint32_t _size271; + ::apache::thrift::protocol::TType _etype274; + iprot->readListBegin(_etype274, _size271); + (*(this->success)).resize(_size271); + uint32_t _i275; + for (_i275 = 0; _i275 < _size271; ++_i275) { - xfer += (*(this->success))[_i253].read(iprot); + xfer += (*(this->success))[_i275].read(iprot); } iprot->readListEnd(); } @@ -4062,14 +4302,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size254; - ::apache::thrift::protocol::TType _etype257; - iprot->readListBegin(_etype257, _size254); - this->success.resize(_size254); - uint32_t _i258; - for (_i258 = 0; _i258 < _size254; ++_i258) + uint32_t _size276; + ::apache::thrift::protocol::TType _etype279; + iprot->readListBegin(_etype279, _size276); + this->success.resize(_size276); + uint32_t _i280; + for (_i280 = 0; _i280 < _size276; ++_i280) { - xfer += iprot->readString(this->success[_i258]); + xfer += iprot->readString(this->success[_i280]); } iprot->readListEnd(); } @@ -4124,10 +4364,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter259; - for (_iter259 = this->success.begin(); _iter259 != this->success.end(); ++_iter259) + std::vector ::const_iterator _iter281; + for (_iter281 = this->success.begin(); _iter281 != this->success.end(); ++_iter281) { - xfer += oprot->writeString((*_iter259)); + xfer += oprot->writeString((*_iter281)); } xfer += oprot->writeListEnd(); } @@ -4174,14 +4414,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size260; - ::apache::thrift::protocol::TType _etype263; - iprot->readListBegin(_etype263, _size260); - (*(this->success)).resize(_size260); - uint32_t _i264; - for (_i264 = 0; _i264 < _size260; ++_i264) + uint32_t _size282; + ::apache::thrift::protocol::TType _etype285; + iprot->readListBegin(_etype285, _size282); + (*(this->success)).resize(_size282); + uint32_t _i286; + for (_i286 = 0; _i286 < _size282; ++_i286) { - xfer += iprot->readString((*(this->success))[_i264]); + xfer += iprot->readString((*(this->success))[_i286]); } iprot->readListEnd(); } @@ -4674,14 +4914,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size265; - ::apache::thrift::protocol::TType _etype268; - iprot->readListBegin(_etype268, _size265); - this->new_parts.resize(_size265); - uint32_t _i269; - for (_i269 = 0; _i269 < _size265; ++_i269) + uint32_t _size287; + ::apache::thrift::protocol::TType _etype290; + iprot->readListBegin(_etype290, _size287); + this->new_parts.resize(_size287); + uint32_t _i291; + for (_i291 = 0; _i291 < _size287; ++_i291) { - xfer += this->new_parts[_i269].read(iprot); + xfer += this->new_parts[_i291].read(iprot); } iprot->readListEnd(); } @@ -4708,10 +4948,10 @@ xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter270; - for (_iter270 = this->new_parts.begin(); _iter270 != this->new_parts.end(); ++_iter270) + std::vector ::const_iterator _iter292; + for (_iter292 = this->new_parts.begin(); _iter292 != this->new_parts.end(); ++_iter292) { - xfer += (*_iter270).write(oprot); + xfer += (*_iter292).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4727,10 +4967,10 @@ xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter271; - for (_iter271 = (*(this->new_parts)).begin(); _iter271 != (*(this->new_parts)).end(); ++_iter271) + std::vector ::const_iterator _iter293; + for (_iter293 = (*(this->new_parts)).begin(); _iter293 != (*(this->new_parts)).end(); ++_iter293) { - xfer += (*_iter271).write(oprot); + xfer += (*_iter293).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4936,14 +5176,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size272; - ::apache::thrift::protocol::TType _etype275; - iprot->readListBegin(_etype275, _size272); - this->part_vals.resize(_size272); - uint32_t _i276; - for (_i276 = 0; _i276 < _size272; ++_i276) + uint32_t _size294; + ::apache::thrift::protocol::TType _etype297; + iprot->readListBegin(_etype297, _size294); + this->part_vals.resize(_size294); + uint32_t _i298; + for (_i298 = 0; _i298 < _size294; ++_i298) { - xfer += iprot->readString(this->part_vals[_i276]); + xfer += iprot->readString(this->part_vals[_i298]); } iprot->readListEnd(); } @@ -4976,10 +5216,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter277; - for (_iter277 = this->part_vals.begin(); _iter277 != this->part_vals.end(); ++_iter277) + std::vector ::const_iterator _iter299; + for (_iter299 = this->part_vals.begin(); _iter299 != this->part_vals.end(); ++_iter299) { - xfer += oprot->writeString((*_iter277)); + xfer += oprot->writeString((*_iter299)); } xfer += oprot->writeListEnd(); } @@ -5001,10 +5241,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter278; - for (_iter278 = (*(this->part_vals)).begin(); _iter278 != (*(this->part_vals)).end(); ++_iter278) + std::vector ::const_iterator _iter300; + for (_iter300 = (*(this->part_vals)).begin(); _iter300 != (*(this->part_vals)).end(); ++_iter300) { - xfer += oprot->writeString((*_iter278)); + xfer += oprot->writeString((*_iter300)); } xfer += oprot->writeListEnd(); } @@ -5456,14 +5696,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size279; - ::apache::thrift::protocol::TType _etype282; - iprot->readListBegin(_etype282, _size279); - this->part_vals.resize(_size279); - uint32_t _i283; - for (_i283 = 0; _i283 < _size279; ++_i283) + uint32_t _size301; + ::apache::thrift::protocol::TType _etype304; + iprot->readListBegin(_etype304, _size301); + this->part_vals.resize(_size301); + uint32_t _i305; + for (_i305 = 0; _i305 < _size301; ++_i305) { - xfer += iprot->readString(this->part_vals[_i283]); + xfer += iprot->readString(this->part_vals[_i305]); } iprot->readListEnd(); } @@ -5504,10 +5744,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter284; - for (_iter284 = this->part_vals.begin(); _iter284 != this->part_vals.end(); ++_iter284) + std::vector ::const_iterator _iter306; + for (_iter306 = this->part_vals.begin(); _iter306 != this->part_vals.end(); ++_iter306) { - xfer += oprot->writeString((*_iter284)); + xfer += oprot->writeString((*_iter306)); } xfer += oprot->writeListEnd(); } @@ -5532,10 +5772,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter285; - for (_iter285 = (*(this->part_vals)).begin(); _iter285 != (*(this->part_vals)).end(); ++_iter285) + std::vector ::const_iterator _iter307; + for (_iter307 = (*(this->part_vals)).begin(); _iter307 != (*(this->part_vals)).end(); ++_iter307) { - xfer += oprot->writeString((*_iter285)); + xfer += oprot->writeString((*_iter307)); } xfer += oprot->writeListEnd(); } @@ -5684,6 +5924,308 @@ return xfer; } +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cluster_name); + this->__isset.cluster_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size308; + ::apache::thrift::protocol::TType _etype311; + iprot->readListBegin(_etype311, _size308); + this->part_vals.resize(_size308); + uint32_t _i312; + for (_i312 = 0; _i312 < _size308; ++_i312) + { + xfer += iprot->readString(this->part_vals[_i312]); + } + iprot->readListEnd(); + } + this->__isset.part_vals = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->deleteData); + this->__isset.deleteData = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_on_cluster_args"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->cluster_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 4); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); + std::vector ::const_iterator _iter313; + for (_iter313 = this->part_vals.begin(); _iter313 != this->part_vals.end(); ++_iter313) + { + xfer += oprot->writeString((*_iter313)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 5); + xfer += oprot->writeBool(this->deleteData); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_on_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->cluster_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 4); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); + std::vector ::const_iterator _iter314; + for (_iter314 = (*(this->part_vals)).begin(); _iter314 != (*(this->part_vals)).end(); ++_iter314) + { + xfer += oprot->writeString((*_iter314)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 5); + xfer += oprot->writeBool((*(this->deleteData))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_on_cluster_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_partition_on_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_drop_partition_by_name_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -5964,14 +6506,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size286; - ::apache::thrift::protocol::TType _etype289; - iprot->readListBegin(_etype289, _size286); - this->part_vals.resize(_size286); - uint32_t _i290; - for (_i290 = 0; _i290 < _size286; ++_i290) + uint32_t _size315; + ::apache::thrift::protocol::TType _etype318; + iprot->readListBegin(_etype318, _size315); + this->part_vals.resize(_size315); + uint32_t _i319; + for (_i319 = 0; _i319 < _size315; ++_i319) { - xfer += iprot->readString(this->part_vals[_i290]); + xfer += iprot->readString(this->part_vals[_i319]); } iprot->readListEnd(); } @@ -6004,10 +6546,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter291; - for (_iter291 = this->part_vals.begin(); _iter291 != this->part_vals.end(); ++_iter291) + std::vector ::const_iterator _iter320; + for (_iter320 = this->part_vals.begin(); _iter320 != this->part_vals.end(); ++_iter320) { - xfer += oprot->writeString((*_iter291)); + xfer += oprot->writeString((*_iter320)); } xfer += oprot->writeListEnd(); } @@ -6029,10 +6571,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter292; - for (_iter292 = (*(this->part_vals)).begin(); _iter292 != (*(this->part_vals)).end(); ++_iter292) + std::vector ::const_iterator _iter321; + for (_iter321 = (*(this->part_vals)).begin(); _iter321 != (*(this->part_vals)).end(); ++_iter321) { - xfer += oprot->writeString((*_iter292)); + xfer += oprot->writeString((*_iter321)); } xfer += oprot->writeListEnd(); } @@ -6218,14 +6760,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size293; - ::apache::thrift::protocol::TType _etype296; - iprot->readListBegin(_etype296, _size293); - this->part_vals.resize(_size293); - uint32_t _i297; - for (_i297 = 0; _i297 < _size293; ++_i297) + uint32_t _size322; + ::apache::thrift::protocol::TType _etype325; + iprot->readListBegin(_etype325, _size322); + this->part_vals.resize(_size322); + uint32_t _i326; + for (_i326 = 0; _i326 < _size322; ++_i326) { - xfer += iprot->readString(this->part_vals[_i297]); + xfer += iprot->readString(this->part_vals[_i326]); } iprot->readListEnd(); } @@ -6246,14 +6788,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size298; - ::apache::thrift::protocol::TType _etype301; - iprot->readListBegin(_etype301, _size298); - this->group_names.resize(_size298); - uint32_t _i302; - for (_i302 = 0; _i302 < _size298; ++_i302) + uint32_t _size327; + ::apache::thrift::protocol::TType _etype330; + iprot->readListBegin(_etype330, _size327); + this->group_names.resize(_size327); + uint32_t _i331; + for (_i331 = 0; _i331 < _size327; ++_i331) { - xfer += iprot->readString(this->group_names[_i302]); + xfer += iprot->readString(this->group_names[_i331]); } iprot->readListEnd(); } @@ -6286,10 +6828,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter303; - for (_iter303 = this->part_vals.begin(); _iter303 != this->part_vals.end(); ++_iter303) + std::vector ::const_iterator _iter332; + for (_iter332 = this->part_vals.begin(); _iter332 != this->part_vals.end(); ++_iter332) { - xfer += oprot->writeString((*_iter303)); + xfer += oprot->writeString((*_iter332)); } xfer += oprot->writeListEnd(); } @@ -6300,10 +6842,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter304; - for (_iter304 = this->group_names.begin(); _iter304 != this->group_names.end(); ++_iter304) + std::vector ::const_iterator _iter333; + for (_iter333 = this->group_names.begin(); _iter333 != this->group_names.end(); ++_iter333) { - xfer += oprot->writeString((*_iter304)); + xfer += oprot->writeString((*_iter333)); } xfer += oprot->writeListEnd(); } @@ -6325,10 +6867,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter305; - for (_iter305 = (*(this->part_vals)).begin(); _iter305 != (*(this->part_vals)).end(); ++_iter305) + std::vector ::const_iterator _iter334; + for (_iter334 = (*(this->part_vals)).begin(); _iter334 != (*(this->part_vals)).end(); ++_iter334) { - xfer += oprot->writeString((*_iter305)); + xfer += oprot->writeString((*_iter334)); } xfer += oprot->writeListEnd(); } @@ -6339,10 +6881,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter306; - for (_iter306 = (*(this->group_names)).begin(); _iter306 != (*(this->group_names)).end(); ++_iter306) + std::vector ::const_iterator _iter335; + for (_iter335 = (*(this->group_names)).begin(); _iter335 != (*(this->group_names)).end(); ++_iter335) { - xfer += oprot->writeString((*_iter306)); + xfer += oprot->writeString((*_iter335)); } xfer += oprot->writeListEnd(); } @@ -6828,14 +7370,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size307; - ::apache::thrift::protocol::TType _etype310; - iprot->readListBegin(_etype310, _size307); - this->success.resize(_size307); - uint32_t _i311; - for (_i311 = 0; _i311 < _size307; ++_i311) + uint32_t _size336; + ::apache::thrift::protocol::TType _etype339; + iprot->readListBegin(_etype339, _size336); + this->success.resize(_size336); + uint32_t _i340; + for (_i340 = 0; _i340 < _size336; ++_i340) { - xfer += this->success[_i311].read(iprot); + xfer += this->success[_i340].read(iprot); } iprot->readListEnd(); } @@ -6882,10 +7424,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter312; - for (_iter312 = this->success.begin(); _iter312 != this->success.end(); ++_iter312) + std::vector ::const_iterator _iter341; + for (_iter341 = this->success.begin(); _iter341 != this->success.end(); ++_iter341) { - xfer += (*_iter312).write(oprot); + xfer += (*_iter341).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6928,14 +7470,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size313; - ::apache::thrift::protocol::TType _etype316; - iprot->readListBegin(_etype316, _size313); - (*(this->success)).resize(_size313); - uint32_t _i317; - for (_i317 = 0; _i317 < _size313; ++_i317) + uint32_t _size342; + ::apache::thrift::protocol::TType _etype345; + iprot->readListBegin(_etype345, _size342); + (*(this->success)).resize(_size342); + uint32_t _i346; + for (_i346 = 0; _i346 < _size342; ++_i346) { - xfer += (*(this->success))[_i317].read(iprot); + xfer += (*(this->success))[_i346].read(iprot); } iprot->readListEnd(); } @@ -7028,14 +7570,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size318; - ::apache::thrift::protocol::TType _etype321; - iprot->readListBegin(_etype321, _size318); - this->group_names.resize(_size318); - uint32_t _i322; - for (_i322 = 0; _i322 < _size318; ++_i322) + uint32_t _size347; + ::apache::thrift::protocol::TType _etype350; + iprot->readListBegin(_etype350, _size347); + this->group_names.resize(_size347); + uint32_t _i351; + for (_i351 = 0; _i351 < _size347; ++_i351) { - xfer += iprot->readString(this->group_names[_i322]); + xfer += iprot->readString(this->group_names[_i351]); } iprot->readListEnd(); } @@ -7074,10 +7616,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter323; - for (_iter323 = this->group_names.begin(); _iter323 != this->group_names.end(); ++_iter323) + std::vector ::const_iterator _iter352; + for (_iter352 = this->group_names.begin(); _iter352 != this->group_names.end(); ++_iter352) { - xfer += oprot->writeString((*_iter323)); + xfer += oprot->writeString((*_iter352)); } xfer += oprot->writeListEnd(); } @@ -7105,10 +7647,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter324; - for (_iter324 = (*(this->group_names)).begin(); _iter324 != (*(this->group_names)).end(); ++_iter324) + std::vector ::const_iterator _iter353; + for (_iter353 = (*(this->group_names)).begin(); _iter353 != (*(this->group_names)).end(); ++_iter353) { - xfer += oprot->writeString((*_iter324)); + xfer += oprot->writeString((*_iter353)); } xfer += oprot->writeListEnd(); } @@ -7142,14 +7684,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size325; - ::apache::thrift::protocol::TType _etype328; - iprot->readListBegin(_etype328, _size325); - this->success.resize(_size325); - uint32_t _i329; - for (_i329 = 0; _i329 < _size325; ++_i329) + uint32_t _size354; + ::apache::thrift::protocol::TType _etype357; + iprot->readListBegin(_etype357, _size354); + this->success.resize(_size354); + uint32_t _i358; + for (_i358 = 0; _i358 < _size354; ++_i358) { - xfer += this->success[_i329].read(iprot); + xfer += this->success[_i358].read(iprot); } iprot->readListEnd(); } @@ -7196,10 +7738,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter330; - for (_iter330 = this->success.begin(); _iter330 != this->success.end(); ++_iter330) + std::vector ::const_iterator _iter359; + for (_iter359 = this->success.begin(); _iter359 != this->success.end(); ++_iter359) { - xfer += (*_iter330).write(oprot); + xfer += (*_iter359).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7242,14 +7784,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size331; - ::apache::thrift::protocol::TType _etype334; - iprot->readListBegin(_etype334, _size331); - (*(this->success)).resize(_size331); - uint32_t _i335; - for (_i335 = 0; _i335 < _size331; ++_i335) + uint32_t _size360; + ::apache::thrift::protocol::TType _etype363; + iprot->readListBegin(_etype363, _size360); + (*(this->success)).resize(_size360); + uint32_t _i364; + for (_i364 = 0; _i364 < _size360; ++_i364) { - xfer += (*(this->success))[_i335].read(iprot); + xfer += (*(this->success))[_i364].read(iprot); } iprot->readListEnd(); } @@ -7400,14 +7942,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size336; - ::apache::thrift::protocol::TType _etype339; - iprot->readListBegin(_etype339, _size336); - this->success.resize(_size336); - uint32_t _i340; - for (_i340 = 0; _i340 < _size336; ++_i340) + uint32_t _size365; + ::apache::thrift::protocol::TType _etype368; + iprot->readListBegin(_etype368, _size365); + this->success.resize(_size365); + uint32_t _i369; + for (_i369 = 0; _i369 < _size365; ++_i369) { - xfer += iprot->readString(this->success[_i340]); + xfer += iprot->readString(this->success[_i369]); } iprot->readListEnd(); } @@ -7446,10 +7988,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter341; - for (_iter341 = this->success.begin(); _iter341 != this->success.end(); ++_iter341) + std::vector ::const_iterator _iter370; + for (_iter370 = this->success.begin(); _iter370 != this->success.end(); ++_iter370) { - xfer += oprot->writeString((*_iter341)); + xfer += oprot->writeString((*_iter370)); } xfer += oprot->writeListEnd(); } @@ -7488,14 +8030,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size342; - ::apache::thrift::protocol::TType _etype345; - iprot->readListBegin(_etype345, _size342); - (*(this->success)).resize(_size342); - uint32_t _i346; - for (_i346 = 0; _i346 < _size342; ++_i346) + uint32_t _size371; + ::apache::thrift::protocol::TType _etype374; + iprot->readListBegin(_etype374, _size371); + (*(this->success)).resize(_size371); + uint32_t _i375; + for (_i375 = 0; _i375 < _size371; ++_i375) { - xfer += iprot->readString((*(this->success))[_i346]); + xfer += iprot->readString((*(this->success))[_i375]); } iprot->readListEnd(); } @@ -7564,14 +8106,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size347; - ::apache::thrift::protocol::TType _etype350; - iprot->readListBegin(_etype350, _size347); - this->part_vals.resize(_size347); - uint32_t _i351; - for (_i351 = 0; _i351 < _size347; ++_i351) + uint32_t _size376; + ::apache::thrift::protocol::TType _etype379; + iprot->readListBegin(_etype379, _size376); + this->part_vals.resize(_size376); + uint32_t _i380; + for (_i380 = 0; _i380 < _size376; ++_i380) { - xfer += iprot->readString(this->part_vals[_i351]); + xfer += iprot->readString(this->part_vals[_i380]); } iprot->readListEnd(); } @@ -7612,10 +8154,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter352; - for (_iter352 = this->part_vals.begin(); _iter352 != this->part_vals.end(); ++_iter352) + std::vector ::const_iterator _iter381; + for (_iter381 = this->part_vals.begin(); _iter381 != this->part_vals.end(); ++_iter381) { - xfer += oprot->writeString((*_iter352)); + xfer += oprot->writeString((*_iter381)); } xfer += oprot->writeListEnd(); } @@ -7640,10 +8182,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter353; - for (_iter353 = (*(this->part_vals)).begin(); _iter353 != (*(this->part_vals)).end(); ++_iter353) + std::vector ::const_iterator _iter382; + for (_iter382 = (*(this->part_vals)).begin(); _iter382 != (*(this->part_vals)).end(); ++_iter382) { - xfer += oprot->writeString((*_iter353)); + xfer += oprot->writeString((*_iter382)); } xfer += oprot->writeListEnd(); } @@ -7680,14 +8222,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size354; - ::apache::thrift::protocol::TType _etype357; - iprot->readListBegin(_etype357, _size354); - this->success.resize(_size354); - uint32_t _i358; - for (_i358 = 0; _i358 < _size354; ++_i358) + uint32_t _size383; + ::apache::thrift::protocol::TType _etype386; + iprot->readListBegin(_etype386, _size383); + this->success.resize(_size383); + uint32_t _i387; + for (_i387 = 0; _i387 < _size383; ++_i387) { - xfer += this->success[_i358].read(iprot); + xfer += this->success[_i387].read(iprot); } iprot->readListEnd(); } @@ -7734,10 +8276,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter359; - for (_iter359 = this->success.begin(); _iter359 != this->success.end(); ++_iter359) + std::vector ::const_iterator _iter388; + for (_iter388 = this->success.begin(); _iter388 != this->success.end(); ++_iter388) { - xfer += (*_iter359).write(oprot); + xfer += (*_iter388).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7780,14 +8322,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size360; - ::apache::thrift::protocol::TType _etype363; - iprot->readListBegin(_etype363, _size360); - (*(this->success)).resize(_size360); - uint32_t _i364; - for (_i364 = 0; _i364 < _size360; ++_i364) + uint32_t _size389; + ::apache::thrift::protocol::TType _etype392; + iprot->readListBegin(_etype392, _size389); + (*(this->success)).resize(_size389); + uint32_t _i393; + for (_i393 = 0; _i393 < _size389; ++_i393) { - xfer += (*(this->success))[_i364].read(iprot); + xfer += (*(this->success))[_i393].read(iprot); } iprot->readListEnd(); } @@ -7864,14 +8406,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size365; - ::apache::thrift::protocol::TType _etype368; - iprot->readListBegin(_etype368, _size365); - this->part_vals.resize(_size365); - uint32_t _i369; - for (_i369 = 0; _i369 < _size365; ++_i369) + uint32_t _size394; + ::apache::thrift::protocol::TType _etype397; + iprot->readListBegin(_etype397, _size394); + this->part_vals.resize(_size394); + uint32_t _i398; + for (_i398 = 0; _i398 < _size394; ++_i398) { - xfer += iprot->readString(this->part_vals[_i369]); + xfer += iprot->readString(this->part_vals[_i398]); } iprot->readListEnd(); } @@ -7900,14 +8442,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size370; - ::apache::thrift::protocol::TType _etype373; - iprot->readListBegin(_etype373, _size370); - this->group_names.resize(_size370); - uint32_t _i374; - for (_i374 = 0; _i374 < _size370; ++_i374) + uint32_t _size399; + ::apache::thrift::protocol::TType _etype402; + iprot->readListBegin(_etype402, _size399); + this->group_names.resize(_size399); + uint32_t _i403; + for (_i403 = 0; _i403 < _size399; ++_i403) { - xfer += iprot->readString(this->group_names[_i374]); + xfer += iprot->readString(this->group_names[_i403]); } iprot->readListEnd(); } @@ -7940,10 +8482,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter375; - for (_iter375 = this->part_vals.begin(); _iter375 != this->part_vals.end(); ++_iter375) + std::vector ::const_iterator _iter404; + for (_iter404 = this->part_vals.begin(); _iter404 != this->part_vals.end(); ++_iter404) { - xfer += oprot->writeString((*_iter375)); + xfer += oprot->writeString((*_iter404)); } xfer += oprot->writeListEnd(); } @@ -7957,10 +8499,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter376; - for (_iter376 = this->group_names.begin(); _iter376 != this->group_names.end(); ++_iter376) + std::vector ::const_iterator _iter405; + for (_iter405 = this->group_names.begin(); _iter405 != this->group_names.end(); ++_iter405) { - xfer += oprot->writeString((*_iter376)); + xfer += oprot->writeString((*_iter405)); } xfer += oprot->writeListEnd(); } @@ -7982,10 +8524,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter377; - for (_iter377 = (*(this->part_vals)).begin(); _iter377 != (*(this->part_vals)).end(); ++_iter377) + std::vector ::const_iterator _iter406; + for (_iter406 = (*(this->part_vals)).begin(); _iter406 != (*(this->part_vals)).end(); ++_iter406) { - xfer += oprot->writeString((*_iter377)); + xfer += oprot->writeString((*_iter406)); } xfer += oprot->writeListEnd(); } @@ -7999,10 +8541,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter378; - for (_iter378 = (*(this->group_names)).begin(); _iter378 != (*(this->group_names)).end(); ++_iter378) + std::vector ::const_iterator _iter407; + for (_iter407 = (*(this->group_names)).begin(); _iter407 != (*(this->group_names)).end(); ++_iter407) { - xfer += oprot->writeString((*_iter378)); + xfer += oprot->writeString((*_iter407)); } xfer += oprot->writeListEnd(); } @@ -8036,14 +8578,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size379; - ::apache::thrift::protocol::TType _etype382; - iprot->readListBegin(_etype382, _size379); - this->success.resize(_size379); - uint32_t _i383; - for (_i383 = 0; _i383 < _size379; ++_i383) + uint32_t _size408; + ::apache::thrift::protocol::TType _etype411; + iprot->readListBegin(_etype411, _size408); + this->success.resize(_size408); + uint32_t _i412; + for (_i412 = 0; _i412 < _size408; ++_i412) { - xfer += this->success[_i383].read(iprot); + xfer += this->success[_i412].read(iprot); } iprot->readListEnd(); } @@ -8090,10 +8632,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter384; - for (_iter384 = this->success.begin(); _iter384 != this->success.end(); ++_iter384) + std::vector ::const_iterator _iter413; + for (_iter413 = this->success.begin(); _iter413 != this->success.end(); ++_iter413) { - xfer += (*_iter384).write(oprot); + xfer += (*_iter413).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8136,14 +8678,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size385; - ::apache::thrift::protocol::TType _etype388; - iprot->readListBegin(_etype388, _size385); - (*(this->success)).resize(_size385); - uint32_t _i389; - for (_i389 = 0; _i389 < _size385; ++_i389) + uint32_t _size414; + ::apache::thrift::protocol::TType _etype417; + iprot->readListBegin(_etype417, _size414); + (*(this->success)).resize(_size414); + uint32_t _i418; + for (_i418 = 0; _i418 < _size414; ++_i418) { - xfer += (*(this->success))[_i389].read(iprot); + xfer += (*(this->success))[_i418].read(iprot); } iprot->readListEnd(); } @@ -8220,14 +8762,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size390; - ::apache::thrift::protocol::TType _etype393; - iprot->readListBegin(_etype393, _size390); - this->part_vals.resize(_size390); - uint32_t _i394; - for (_i394 = 0; _i394 < _size390; ++_i394) + uint32_t _size419; + ::apache::thrift::protocol::TType _etype422; + iprot->readListBegin(_etype422, _size419); + this->part_vals.resize(_size419); + uint32_t _i423; + for (_i423 = 0; _i423 < _size419; ++_i423) { - xfer += iprot->readString(this->part_vals[_i394]); + xfer += iprot->readString(this->part_vals[_i423]); } iprot->readListEnd(); } @@ -8268,10 +8810,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter395; - for (_iter395 = this->part_vals.begin(); _iter395 != this->part_vals.end(); ++_iter395) + std::vector ::const_iterator _iter424; + for (_iter424 = this->part_vals.begin(); _iter424 != this->part_vals.end(); ++_iter424) { - xfer += oprot->writeString((*_iter395)); + xfer += oprot->writeString((*_iter424)); } xfer += oprot->writeListEnd(); } @@ -8296,10 +8838,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter396; - for (_iter396 = (*(this->part_vals)).begin(); _iter396 != (*(this->part_vals)).end(); ++_iter396) + std::vector ::const_iterator _iter425; + for (_iter425 = (*(this->part_vals)).begin(); _iter425 != (*(this->part_vals)).end(); ++_iter425) { - xfer += oprot->writeString((*_iter396)); + xfer += oprot->writeString((*_iter425)); } xfer += oprot->writeListEnd(); } @@ -8336,14 +8878,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size397; - ::apache::thrift::protocol::TType _etype400; - iprot->readListBegin(_etype400, _size397); - this->success.resize(_size397); - uint32_t _i401; - for (_i401 = 0; _i401 < _size397; ++_i401) + uint32_t _size426; + ::apache::thrift::protocol::TType _etype429; + iprot->readListBegin(_etype429, _size426); + this->success.resize(_size426); + uint32_t _i430; + for (_i430 = 0; _i430 < _size426; ++_i430) { - xfer += iprot->readString(this->success[_i401]); + xfer += iprot->readString(this->success[_i430]); } iprot->readListEnd(); } @@ -8390,10 +8932,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter402; - for (_iter402 = this->success.begin(); _iter402 != this->success.end(); ++_iter402) + std::vector ::const_iterator _iter431; + for (_iter431 = this->success.begin(); _iter431 != this->success.end(); ++_iter431) { - xfer += oprot->writeString((*_iter402)); + xfer += oprot->writeString((*_iter431)); } xfer += oprot->writeListEnd(); } @@ -8436,14 +8978,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size403; - ::apache::thrift::protocol::TType _etype406; - iprot->readListBegin(_etype406, _size403); - (*(this->success)).resize(_size403); - uint32_t _i407; - for (_i407 = 0; _i407 < _size403; ++_i407) + uint32_t _size432; + ::apache::thrift::protocol::TType _etype435; + iprot->readListBegin(_etype435, _size432); + (*(this->success)).resize(_size432); + uint32_t _i436; + for (_i436 = 0; _i436 < _size432; ++_i436) { - xfer += iprot->readString((*(this->success))[_i407]); + xfer += iprot->readString((*(this->success))[_i436]); } iprot->readListEnd(); } @@ -8608,14 +9150,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size408; - ::apache::thrift::protocol::TType _etype411; - iprot->readListBegin(_etype411, _size408); - this->success.resize(_size408); - uint32_t _i412; - for (_i412 = 0; _i412 < _size408; ++_i412) + uint32_t _size437; + ::apache::thrift::protocol::TType _etype440; + iprot->readListBegin(_etype440, _size437); + this->success.resize(_size437); + uint32_t _i441; + for (_i441 = 0; _i441 < _size437; ++_i441) { - xfer += this->success[_i412].read(iprot); + xfer += this->success[_i441].read(iprot); } iprot->readListEnd(); } @@ -8662,10 +9204,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter413; - for (_iter413 = this->success.begin(); _iter413 != this->success.end(); ++_iter413) + std::vector ::const_iterator _iter442; + for (_iter442 = this->success.begin(); _iter442 != this->success.end(); ++_iter442) { - xfer += (*_iter413).write(oprot); + xfer += (*_iter442).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8708,14 +9250,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size414; - ::apache::thrift::protocol::TType _etype417; - iprot->readListBegin(_etype417, _size414); - (*(this->success)).resize(_size414); - uint32_t _i418; - for (_i418 = 0; _i418 < _size414; ++_i418) + uint32_t _size443; + ::apache::thrift::protocol::TType _etype446; + iprot->readListBegin(_etype446, _size443); + (*(this->success)).resize(_size443); + uint32_t _i447; + for (_i447 = 0; _i447 < _size443; ++_i447) { - xfer += (*(this->success))[_i418].read(iprot); + xfer += (*(this->success))[_i447].read(iprot); } iprot->readListEnd(); } @@ -8792,14 +9334,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size419; - ::apache::thrift::protocol::TType _etype422; - iprot->readListBegin(_etype422, _size419); - this->names.resize(_size419); - uint32_t _i423; - for (_i423 = 0; _i423 < _size419; ++_i423) + uint32_t _size448; + ::apache::thrift::protocol::TType _etype451; + iprot->readListBegin(_etype451, _size448); + this->names.resize(_size448); + uint32_t _i452; + for (_i452 = 0; _i452 < _size448; ++_i452) { - xfer += iprot->readString(this->names[_i423]); + xfer += iprot->readString(this->names[_i452]); } iprot->readListEnd(); } @@ -8832,10 +9374,10 @@ xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter424; - for (_iter424 = this->names.begin(); _iter424 != this->names.end(); ++_iter424) + std::vector ::const_iterator _iter453; + for (_iter453 = this->names.begin(); _iter453 != this->names.end(); ++_iter453) { - xfer += oprot->writeString((*_iter424)); + xfer += oprot->writeString((*_iter453)); } xfer += oprot->writeListEnd(); } @@ -8857,10 +9399,10 @@ xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter425; - for (_iter425 = (*(this->names)).begin(); _iter425 != (*(this->names)).end(); ++_iter425) + std::vector ::const_iterator _iter454; + for (_iter454 = (*(this->names)).begin(); _iter454 != (*(this->names)).end(); ++_iter454) { - xfer += oprot->writeString((*_iter425)); + xfer += oprot->writeString((*_iter454)); } xfer += oprot->writeListEnd(); } @@ -8894,14 +9436,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size426; - ::apache::thrift::protocol::TType _etype429; - iprot->readListBegin(_etype429, _size426); - this->success.resize(_size426); - uint32_t _i430; - for (_i430 = 0; _i430 < _size426; ++_i430) + uint32_t _size455; + ::apache::thrift::protocol::TType _etype458; + iprot->readListBegin(_etype458, _size455); + this->success.resize(_size455); + uint32_t _i459; + for (_i459 = 0; _i459 < _size455; ++_i459) { - xfer += this->success[_i430].read(iprot); + xfer += this->success[_i459].read(iprot); } iprot->readListEnd(); } @@ -8948,10 +9490,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter431; - for (_iter431 = this->success.begin(); _iter431 != this->success.end(); ++_iter431) + std::vector ::const_iterator _iter460; + for (_iter460 = this->success.begin(); _iter460 != this->success.end(); ++_iter460) { - xfer += (*_iter431).write(oprot); + xfer += (*_iter460).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8994,14 +9536,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size432; - ::apache::thrift::protocol::TType _etype435; - iprot->readListBegin(_etype435, _size432); - (*(this->success)).resize(_size432); - uint32_t _i436; - for (_i436 = 0; _i436 < _size432; ++_i436) + uint32_t _size461; + ::apache::thrift::protocol::TType _etype464; + iprot->readListBegin(_etype464, _size461); + (*(this->success)).resize(_size461); + uint32_t _i465; + for (_i465 = 0; _i465 < _size461; ++_i465) { - xfer += (*(this->success))[_i436].read(iprot); + xfer += (*(this->success))[_i465].read(iprot); } iprot->readListEnd(); } @@ -9284,14 +9826,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size437; - ::apache::thrift::protocol::TType _etype440; - iprot->readListBegin(_etype440, _size437); - this->part_vals.resize(_size437); - uint32_t _i441; - for (_i441 = 0; _i441 < _size437; ++_i441) + uint32_t _size466; + ::apache::thrift::protocol::TType _etype469; + iprot->readListBegin(_etype469, _size466); + this->part_vals.resize(_size466); + uint32_t _i470; + for (_i470 = 0; _i470 < _size466; ++_i470) { - xfer += iprot->readString(this->part_vals[_i441]); + xfer += iprot->readString(this->part_vals[_i470]); } iprot->readListEnd(); } @@ -9332,10 +9874,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter442; - for (_iter442 = this->part_vals.begin(); _iter442 != this->part_vals.end(); ++_iter442) + std::vector ::const_iterator _iter471; + for (_iter471 = this->part_vals.begin(); _iter471 != this->part_vals.end(); ++_iter471) { - xfer += oprot->writeString((*_iter442)); + xfer += oprot->writeString((*_iter471)); } xfer += oprot->writeListEnd(); } @@ -9360,10 +9902,10 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter443; - for (_iter443 = (*(this->part_vals)).begin(); _iter443 != (*(this->part_vals)).end(); ++_iter443) + std::vector ::const_iterator _iter472; + for (_iter472 = (*(this->part_vals)).begin(); _iter472 != (*(this->part_vals)).end(); ++_iter472) { - xfer += oprot->writeString((*_iter443)); + xfer += oprot->writeString((*_iter472)); } xfer += oprot->writeListEnd(); } @@ -9770,14 +10312,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size444; - ::apache::thrift::protocol::TType _etype447; - iprot->readListBegin(_etype447, _size444); - this->success.resize(_size444); - uint32_t _i448; - for (_i448 = 0; _i448 < _size444; ++_i448) + uint32_t _size473; + ::apache::thrift::protocol::TType _etype476; + iprot->readListBegin(_etype476, _size473); + this->success.resize(_size473); + uint32_t _i477; + for (_i477 = 0; _i477 < _size473; ++_i477) { - xfer += iprot->readString(this->success[_i448]); + xfer += iprot->readString(this->success[_i477]); } iprot->readListEnd(); } @@ -9816,10 +10358,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter449; - for (_iter449 = this->success.begin(); _iter449 != this->success.end(); ++_iter449) + std::vector ::const_iterator _iter478; + for (_iter478 = this->success.begin(); _iter478 != this->success.end(); ++_iter478) { - xfer += oprot->writeString((*_iter449)); + xfer += oprot->writeString((*_iter478)); } xfer += oprot->writeListEnd(); } @@ -9858,14 +10400,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size450; - ::apache::thrift::protocol::TType _etype453; - iprot->readListBegin(_etype453, _size450); - (*(this->success)).resize(_size450); - uint32_t _i454; - for (_i454 = 0; _i454 < _size450; ++_i454) + uint32_t _size479; + ::apache::thrift::protocol::TType _etype482; + iprot->readListBegin(_etype482, _size479); + (*(this->success)).resize(_size479); + uint32_t _i483; + for (_i483 = 0; _i483 < _size479; ++_i483) { - xfer += iprot->readString((*(this->success))[_i454]); + xfer += iprot->readString((*(this->success))[_i483]); } iprot->readListEnd(); } @@ -9980,17 +10522,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size455; - ::apache::thrift::protocol::TType _ktype456; - ::apache::thrift::protocol::TType _vtype457; - iprot->readMapBegin(_ktype456, _vtype457, _size455); - uint32_t _i459; - for (_i459 = 0; _i459 < _size455; ++_i459) + uint32_t _size484; + ::apache::thrift::protocol::TType _ktype485; + ::apache::thrift::protocol::TType _vtype486; + iprot->readMapBegin(_ktype485, _vtype486, _size484); + uint32_t _i488; + for (_i488 = 0; _i488 < _size484; ++_i488) { - std::string _key460; - xfer += iprot->readString(_key460); - std::string& _val461 = this->success[_key460]; - xfer += iprot->readString(_val461); + std::string _key489; + xfer += iprot->readString(_key489); + std::string& _val490 = this->success[_key489]; + xfer += iprot->readString(_val490); } iprot->readMapEnd(); } @@ -10029,11 +10571,11 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter462; - for (_iter462 = this->success.begin(); _iter462 != this->success.end(); ++_iter462) + std::map ::const_iterator _iter491; + for (_iter491 = this->success.begin(); _iter491 != this->success.end(); ++_iter491) { - xfer += oprot->writeString(_iter462->first); - xfer += oprot->writeString(_iter462->second); + xfer += oprot->writeString(_iter491->first); + xfer += oprot->writeString(_iter491->second); } xfer += oprot->writeMapEnd(); } @@ -10072,17 +10614,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size463; - ::apache::thrift::protocol::TType _ktype464; - ::apache::thrift::protocol::TType _vtype465; - iprot->readMapBegin(_ktype464, _vtype465, _size463); - uint32_t _i467; - for (_i467 = 0; _i467 < _size463; ++_i467) + uint32_t _size492; + ::apache::thrift::protocol::TType _ktype493; + ::apache::thrift::protocol::TType _vtype494; + iprot->readMapBegin(_ktype493, _vtype494, _size492); + uint32_t _i496; + for (_i496 = 0; _i496 < _size492; ++_i496) { - std::string _key468; - xfer += iprot->readString(_key468); - std::string& _val469 = (*(this->success))[_key468]; - xfer += iprot->readString(_val469); + std::string _key497; + xfer += iprot->readString(_key497); + std::string& _val498 = (*(this->success))[_key497]; + xfer += iprot->readString(_val498); } iprot->readMapEnd(); } @@ -10151,17 +10693,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size470; - ::apache::thrift::protocol::TType _ktype471; - ::apache::thrift::protocol::TType _vtype472; - iprot->readMapBegin(_ktype471, _vtype472, _size470); - uint32_t _i474; - for (_i474 = 0; _i474 < _size470; ++_i474) + uint32_t _size499; + ::apache::thrift::protocol::TType _ktype500; + ::apache::thrift::protocol::TType _vtype501; + iprot->readMapBegin(_ktype500, _vtype501, _size499); + uint32_t _i503; + for (_i503 = 0; _i503 < _size499; ++_i503) { - std::string _key475; - xfer += iprot->readString(_key475); - std::string& _val476 = this->part_vals[_key475]; - xfer += iprot->readString(_val476); + std::string _key504; + xfer += iprot->readString(_key504); + std::string& _val505 = this->part_vals[_key504]; + xfer += iprot->readString(_val505); } iprot->readMapEnd(); } @@ -10172,9 +10714,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast477; - xfer += iprot->readI32(ecast477); - this->eventType = (PartitionEventType::type)ecast477; + int32_t ecast506; + xfer += iprot->readI32(ecast506); + this->eventType = (PartitionEventType::type)ecast506; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -10204,11 +10746,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter478; - for (_iter478 = this->part_vals.begin(); _iter478 != this->part_vals.end(); ++_iter478) + std::map ::const_iterator _iter507; + for (_iter507 = this->part_vals.begin(); _iter507 != this->part_vals.end(); ++_iter507) { - xfer += oprot->writeString(_iter478->first); - xfer += oprot->writeString(_iter478->second); + xfer += oprot->writeString(_iter507->first); + xfer += oprot->writeString(_iter507->second); } xfer += oprot->writeMapEnd(); } @@ -10233,11 +10775,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter479; - for (_iter479 = (*(this->part_vals)).begin(); _iter479 != (*(this->part_vals)).end(); ++_iter479) + std::map ::const_iterator _iter508; + for (_iter508 = (*(this->part_vals)).begin(); _iter508 != (*(this->part_vals)).end(); ++_iter508) { - xfer += oprot->writeString(_iter479->first); - xfer += oprot->writeString(_iter479->second); + xfer += oprot->writeString(_iter508->first); + xfer += oprot->writeString(_iter508->second); } xfer += oprot->writeMapEnd(); } @@ -10486,17 +11028,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size480; - ::apache::thrift::protocol::TType _ktype481; - ::apache::thrift::protocol::TType _vtype482; - iprot->readMapBegin(_ktype481, _vtype482, _size480); - uint32_t _i484; - for (_i484 = 0; _i484 < _size480; ++_i484) + uint32_t _size509; + ::apache::thrift::protocol::TType _ktype510; + ::apache::thrift::protocol::TType _vtype511; + iprot->readMapBegin(_ktype510, _vtype511, _size509); + uint32_t _i513; + for (_i513 = 0; _i513 < _size509; ++_i513) { - std::string _key485; - xfer += iprot->readString(_key485); - std::string& _val486 = this->part_vals[_key485]; - xfer += iprot->readString(_val486); + std::string _key514; + xfer += iprot->readString(_key514); + std::string& _val515 = this->part_vals[_key514]; + xfer += iprot->readString(_val515); } iprot->readMapEnd(); } @@ -10507,9 +11049,9 @@ break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast487; - xfer += iprot->readI32(ecast487); - this->eventType = (PartitionEventType::type)ecast487; + int32_t ecast516; + xfer += iprot->readI32(ecast516); + this->eventType = (PartitionEventType::type)ecast516; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -10539,11 +11081,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter488; - for (_iter488 = this->part_vals.begin(); _iter488 != this->part_vals.end(); ++_iter488) + std::map ::const_iterator _iter517; + for (_iter517 = this->part_vals.begin(); _iter517 != this->part_vals.end(); ++_iter517) { - xfer += oprot->writeString(_iter488->first); - xfer += oprot->writeString(_iter488->second); + xfer += oprot->writeString(_iter517->first); + xfer += oprot->writeString(_iter517->second); } xfer += oprot->writeMapEnd(); } @@ -10568,11 +11110,11 @@ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter489; - for (_iter489 = (*(this->part_vals)).begin(); _iter489 != (*(this->part_vals)).end(); ++_iter489) + std::map ::const_iterator _iter518; + for (_iter518 = (*(this->part_vals)).begin(); _iter518 != (*(this->part_vals)).end(); ++_iter518) { - xfer += oprot->writeString(_iter489->first); - xfer += oprot->writeString(_iter489->second); + xfer += oprot->writeString(_iter518->first); + xfer += oprot->writeString(_iter518->second); } xfer += oprot->writeMapEnd(); } @@ -11833,14 +12375,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size490; - ::apache::thrift::protocol::TType _etype493; - iprot->readListBegin(_etype493, _size490); - this->success.resize(_size490); - uint32_t _i494; - for (_i494 = 0; _i494 < _size490; ++_i494) + uint32_t _size519; + ::apache::thrift::protocol::TType _etype522; + iprot->readListBegin(_etype522, _size519); + this->success.resize(_size519); + uint32_t _i523; + for (_i523 = 0; _i523 < _size519; ++_i523) { - xfer += this->success[_i494].read(iprot); + xfer += this->success[_i523].read(iprot); } iprot->readListEnd(); } @@ -11887,10 +12429,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter495; - for (_iter495 = this->success.begin(); _iter495 != this->success.end(); ++_iter495) + std::vector ::const_iterator _iter524; + for (_iter524 = this->success.begin(); _iter524 != this->success.end(); ++_iter524) { - xfer += (*_iter495).write(oprot); + xfer += (*_iter524).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11933,14 +12475,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size496; - ::apache::thrift::protocol::TType _etype499; - iprot->readListBegin(_etype499, _size496); - (*(this->success)).resize(_size496); - uint32_t _i500; - for (_i500 = 0; _i500 < _size496; ++_i500) + uint32_t _size525; + ::apache::thrift::protocol::TType _etype528; + iprot->readListBegin(_etype528, _size525); + (*(this->success)).resize(_size525); + uint32_t _i529; + for (_i529 = 0; _i529 < _size525; ++_i529) { - xfer += (*(this->success))[_i500].read(iprot); + xfer += (*(this->success))[_i529].read(iprot); } iprot->readListEnd(); } @@ -12091,14 +12633,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size501; - ::apache::thrift::protocol::TType _etype504; - iprot->readListBegin(_etype504, _size501); - this->success.resize(_size501); - uint32_t _i505; - for (_i505 = 0; _i505 < _size501; ++_i505) + uint32_t _size530; + ::apache::thrift::protocol::TType _etype533; + iprot->readListBegin(_etype533, _size530); + this->success.resize(_size530); + uint32_t _i534; + for (_i534 = 0; _i534 < _size530; ++_i534) { - xfer += iprot->readString(this->success[_i505]); + xfer += iprot->readString(this->success[_i534]); } iprot->readListEnd(); } @@ -12137,10 +12679,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter506; - for (_iter506 = this->success.begin(); _iter506 != this->success.end(); ++_iter506) + std::vector ::const_iterator _iter535; + for (_iter535 = this->success.begin(); _iter535 != this->success.end(); ++_iter535) { - xfer += oprot->writeString((*_iter506)); + xfer += oprot->writeString((*_iter535)); } xfer += oprot->writeListEnd(); } @@ -12179,14 +12721,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size507; - ::apache::thrift::protocol::TType _etype510; - iprot->readListBegin(_etype510, _size507); - (*(this->success)).resize(_size507); - uint32_t _i511; - for (_i511 = 0; _i511 < _size507; ++_i511) + uint32_t _size536; + ::apache::thrift::protocol::TType _etype539; + iprot->readListBegin(_etype539, _size536); + (*(this->success)).resize(_size536); + uint32_t _i540; + for (_i540 = 0; _i540 < _size536; ++_i540) { - xfer += iprot->readString((*(this->success))[_i511]); + xfer += iprot->readString((*(this->success))[_i540]); } iprot->readListEnd(); } @@ -12215,6 +12757,1028 @@ return xfer; } +uint32_t ThriftHiveMetastore_create_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->cluster.read(iprot); + this->__isset.cluster = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_cluster_args"); + xfer += oprot->writeFieldBegin("cluster", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->cluster.write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_create_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->cluster)).write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_create_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_cluster_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_create_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cluster_name); + this->__isset.cluster_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_cluster_args"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->cluster_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->cluster_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_cluster_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cluster_name); + this->__isset.cluster_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_cluster_args"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->cluster_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->cluster_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_cluster_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_get_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_clusters_args"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_clusters_pargs"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size541; + ::apache::thrift::protocol::TType _etype544; + iprot->readListBegin(_etype544, _size541); + this->success.resize(_size541); + uint32_t _i545; + for (_i545 = 0; _i545 < _size541; ++_i545) + { + xfer += this->success[_i545].read(iprot); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_clusters_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter546; + for (_iter546 = this->success.begin(); _iter546 != this->success.end(); ++_iter546) + { + xfer += (*_iter546).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_list_clusters_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size547; + ::apache::thrift::protocol::TType _etype550; + iprot->readListBegin(_etype550, _size547); + (*(this->success)).resize(_size547); + uint32_t _i551; + for (_i551 = 0; _i551 < _size547; ++_i551) + { + xfer += (*(this->success))[_i551].read(iprot); + } + iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->cluster_name); + this->__isset.cluster_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->cluster.read(iprot); + this->__isset.cluster = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_cluster_args"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->cluster_name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("cluster", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->cluster.write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_cluster_pargs"); + xfer += oprot->writeFieldBegin("cluster_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->cluster_name))); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("cluster", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += (*(this->cluster)).write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_cluster_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_cluster_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -12643,14 +14207,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size512; - ::apache::thrift::protocol::TType _etype515; - iprot->readListBegin(_etype515, _size512); - this->success.resize(_size512); - uint32_t _i516; - for (_i516 = 0; _i516 < _size512; ++_i516) + uint32_t _size552; + ::apache::thrift::protocol::TType _etype555; + iprot->readListBegin(_etype555, _size552); + this->success.resize(_size552); + uint32_t _i556; + for (_i556 = 0; _i556 < _size552; ++_i556) { - xfer += iprot->readString(this->success[_i516]); + xfer += iprot->readString(this->success[_i556]); } iprot->readListEnd(); } @@ -12689,10 +14253,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter517; - for (_iter517 = this->success.begin(); _iter517 != this->success.end(); ++_iter517) + std::vector ::const_iterator _iter557; + for (_iter557 = this->success.begin(); _iter557 != this->success.end(); ++_iter557) { - xfer += oprot->writeString((*_iter517)); + xfer += oprot->writeString((*_iter557)); } xfer += oprot->writeListEnd(); } @@ -12731,14 +14295,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size518; - ::apache::thrift::protocol::TType _etype521; - iprot->readListBegin(_etype521, _size518); - (*(this->success)).resize(_size518); - uint32_t _i522; - for (_i522 = 0; _i522 < _size518; ++_i522) + uint32_t _size558; + ::apache::thrift::protocol::TType _etype561; + iprot->readListBegin(_etype561, _size558); + (*(this->success)).resize(_size558); + uint32_t _i562; + for (_i562 = 0; _i562 < _size558; ++_i562) { - xfer += iprot->readString((*(this->success))[_i522]); + xfer += iprot->readString((*(this->success))[_i562]); } iprot->readListEnd(); } @@ -12805,9 +14369,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast523; - xfer += iprot->readI32(ecast523); - this->principal_type = (PrincipalType::type)ecast523; + int32_t ecast563; + xfer += iprot->readI32(ecast563); + this->principal_type = (PrincipalType::type)ecast563; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -12823,9 +14387,9 @@ break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast524; - xfer += iprot->readI32(ecast524); - this->grantorType = (PrincipalType::type)ecast524; + int32_t ecast564; + xfer += iprot->readI32(ecast564); + this->grantorType = (PrincipalType::type)ecast564; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -13057,9 +14621,9 @@ break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast525; - xfer += iprot->readI32(ecast525); - this->principal_type = (PrincipalType::type)ecast525; + int32_t ecast565; + xfer += iprot->readI32(ecast565); + this->principal_type = (PrincipalType::type)ecast565; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -13257,9 +14821,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast526; - xfer += iprot->readI32(ecast526); - this->principal_type = (PrincipalType::type)ecast526; + int32_t ecast566; + xfer += iprot->readI32(ecast566); + this->principal_type = (PrincipalType::type)ecast566; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -13329,14 +14893,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size527; - ::apache::thrift::protocol::TType _etype530; - iprot->readListBegin(_etype530, _size527); - this->success.resize(_size527); - uint32_t _i531; - for (_i531 = 0; _i531 < _size527; ++_i531) + uint32_t _size567; + ::apache::thrift::protocol::TType _etype570; + iprot->readListBegin(_etype570, _size567); + this->success.resize(_size567); + uint32_t _i571; + for (_i571 = 0; _i571 < _size567; ++_i571) { - xfer += this->success[_i531].read(iprot); + xfer += this->success[_i571].read(iprot); } iprot->readListEnd(); } @@ -13375,10 +14939,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter532; - for (_iter532 = this->success.begin(); _iter532 != this->success.end(); ++_iter532) + std::vector ::const_iterator _iter572; + for (_iter572 = this->success.begin(); _iter572 != this->success.end(); ++_iter572) { - xfer += (*_iter532).write(oprot); + xfer += (*_iter572).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13417,14 +14981,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size533; - ::apache::thrift::protocol::TType _etype536; - iprot->readListBegin(_etype536, _size533); - (*(this->success)).resize(_size533); - uint32_t _i537; - for (_i537 = 0; _i537 < _size533; ++_i537) + uint32_t _size573; + ::apache::thrift::protocol::TType _etype576; + iprot->readListBegin(_etype576, _size573); + (*(this->success)).resize(_size573); + uint32_t _i577; + for (_i577 = 0; _i577 < _size573; ++_i577) { - xfer += (*(this->success))[_i537].read(iprot); + xfer += (*(this->success))[_i577].read(iprot); } iprot->readListEnd(); } @@ -13493,14 +15057,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size538; - ::apache::thrift::protocol::TType _etype541; - iprot->readListBegin(_etype541, _size538); - this->group_names.resize(_size538); - uint32_t _i542; - for (_i542 = 0; _i542 < _size538; ++_i542) + uint32_t _size578; + ::apache::thrift::protocol::TType _etype581; + iprot->readListBegin(_etype581, _size578); + this->group_names.resize(_size578); + uint32_t _i582; + for (_i582 = 0; _i582 < _size578; ++_i582) { - xfer += iprot->readString(this->group_names[_i542]); + xfer += iprot->readString(this->group_names[_i582]); } iprot->readListEnd(); } @@ -13533,10 +15097,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter543; - for (_iter543 = this->group_names.begin(); _iter543 != this->group_names.end(); ++_iter543) + std::vector ::const_iterator _iter583; + for (_iter583 = this->group_names.begin(); _iter583 != this->group_names.end(); ++_iter583) { - xfer += oprot->writeString((*_iter543)); + xfer += oprot->writeString((*_iter583)); } xfer += oprot->writeListEnd(); } @@ -13558,10 +15122,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter544; - for (_iter544 = (*(this->group_names)).begin(); _iter544 != (*(this->group_names)).end(); ++_iter544) + std::vector ::const_iterator _iter584; + for (_iter584 = (*(this->group_names)).begin(); _iter584 != (*(this->group_names)).end(); ++_iter584) { - xfer += oprot->writeString((*_iter544)); + xfer += oprot->writeString((*_iter584)); } xfer += oprot->writeListEnd(); } @@ -13717,9 +15281,9 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast545; - xfer += iprot->readI32(ecast545); - this->principal_type = (PrincipalType::type)ecast545; + int32_t ecast585; + xfer += iprot->readI32(ecast585); + this->principal_type = (PrincipalType::type)ecast585; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -13803,14 +15367,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size546; - ::apache::thrift::protocol::TType _etype549; - iprot->readListBegin(_etype549, _size546); - this->success.resize(_size546); - uint32_t _i550; - for (_i550 = 0; _i550 < _size546; ++_i550) + uint32_t _size586; + ::apache::thrift::protocol::TType _etype589; + iprot->readListBegin(_etype589, _size586); + this->success.resize(_size586); + uint32_t _i590; + for (_i590 = 0; _i590 < _size586; ++_i590) { - xfer += this->success[_i550].read(iprot); + xfer += this->success[_i590].read(iprot); } iprot->readListEnd(); } @@ -13849,10 +15413,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter551; - for (_iter551 = this->success.begin(); _iter551 != this->success.end(); ++_iter551) + std::vector ::const_iterator _iter591; + for (_iter591 = this->success.begin(); _iter591 != this->success.end(); ++_iter591) { - xfer += (*_iter551).write(oprot); + xfer += (*_iter591).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13891,14 +15455,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size552; - ::apache::thrift::protocol::TType _etype555; - iprot->readListBegin(_etype555, _size552); - (*(this->success)).resize(_size552); - uint32_t _i556; - for (_i556 = 0; _i556 < _size552; ++_i556) + uint32_t _size592; + ::apache::thrift::protocol::TType _etype595; + iprot->readListBegin(_etype595, _size592); + (*(this->success)).resize(_size592); + uint32_t _i596; + for (_i596 = 0; _i596 < _size592; ++_i596) { - xfer += (*(this->success))[_i556].read(iprot); + xfer += (*(this->success))[_i596].read(iprot); } iprot->readListEnd(); } @@ -14315,14 +15879,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size557; - ::apache::thrift::protocol::TType _etype560; - iprot->readListBegin(_etype560, _size557); - this->group_names.resize(_size557); - uint32_t _i561; - for (_i561 = 0; _i561 < _size557; ++_i561) + uint32_t _size597; + ::apache::thrift::protocol::TType _etype600; + iprot->readListBegin(_etype600, _size597); + this->group_names.resize(_size597); + uint32_t _i601; + for (_i601 = 0; _i601 < _size597; ++_i601) { - xfer += iprot->readString(this->group_names[_i561]); + xfer += iprot->readString(this->group_names[_i601]); } iprot->readListEnd(); } @@ -14352,10 +15916,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter562; - for (_iter562 = this->group_names.begin(); _iter562 != this->group_names.end(); ++_iter562) + std::vector ::const_iterator _iter602; + for (_iter602 = this->group_names.begin(); _iter602 != this->group_names.end(); ++_iter602) { - xfer += oprot->writeString((*_iter562)); + xfer += oprot->writeString((*_iter602)); } xfer += oprot->writeListEnd(); } @@ -14374,10 +15938,10 @@ xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter563; - for (_iter563 = (*(this->group_names)).begin(); _iter563 != (*(this->group_names)).end(); ++_iter563) + std::vector ::const_iterator _iter603; + for (_iter603 = (*(this->group_names)).begin(); _iter603 != (*(this->group_names)).end(); ++_iter603) { - xfer += oprot->writeString((*_iter563)); + xfer += oprot->writeString((*_iter603)); } xfer += oprot->writeListEnd(); } @@ -14411,14 +15975,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size564; - ::apache::thrift::protocol::TType _etype567; - iprot->readListBegin(_etype567, _size564); - this->success.resize(_size564); - uint32_t _i568; - for (_i568 = 0; _i568 < _size564; ++_i568) + uint32_t _size604; + ::apache::thrift::protocol::TType _etype607; + iprot->readListBegin(_etype607, _size604); + this->success.resize(_size604); + uint32_t _i608; + for (_i608 = 0; _i608 < _size604; ++_i608) { - xfer += iprot->readString(this->success[_i568]); + xfer += iprot->readString(this->success[_i608]); } iprot->readListEnd(); } @@ -14457,10 +16021,10 @@ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter569; - for (_iter569 = this->success.begin(); _iter569 != this->success.end(); ++_iter569) + std::vector ::const_iterator _iter609; + for (_iter609 = this->success.begin(); _iter609 != this->success.end(); ++_iter609) { - xfer += oprot->writeString((*_iter569)); + xfer += oprot->writeString((*_iter609)); } xfer += oprot->writeListEnd(); } @@ -14499,14 +16063,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size570; - ::apache::thrift::protocol::TType _etype573; - iprot->readListBegin(_etype573, _size570); - (*(this->success)).resize(_size570); - uint32_t _i574; - for (_i574 = 0; _i574 < _size570; ++_i574) + uint32_t _size610; + ::apache::thrift::protocol::TType _etype613; + iprot->readListBegin(_etype613, _size610); + (*(this->success)).resize(_size610); + uint32_t _i614; + for (_i614 = 0; _i614 < _size610; ++_i614) { - xfer += iprot->readString((*(this->success))[_i574]); + xfer += iprot->readString((*(this->success))[_i614]); } iprot->readListEnd(); } @@ -15952,6 +17516,71 @@ return; } +void ThriftHiveMetastoreClient::drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData) +{ + send_drop_table_on_cluster(cluster_name, dbname, name, deleteData); + recv_drop_table_on_cluster(); +} + +void ThriftHiveMetastoreClient::send_drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_table_on_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_table_on_cluster_pargs args; + args.cluster_name = &cluster_name; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_table_on_cluster() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_table_on_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_table_on_cluster_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + void ThriftHiveMetastoreClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { send_get_tables(db_name, pattern); @@ -16677,6 +18306,77 @@ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); } +bool ThriftHiveMetastoreClient::drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +{ + send_drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition_on_cluster(); +} + +void ThriftHiveMetastoreClient::send_drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_partition_on_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_partition_on_cluster_pargs args; + args.cluster_name = &cluster_name; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +bool ThriftHiveMetastoreClient::recv_drop_partition_on_cluster() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_partition_on_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + bool _return; + ThriftHiveMetastore_drop_partition_on_cluster_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + return _return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_on_cluster failed: unknown result"); +} + bool ThriftHiveMetastoreClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); @@ -18329,6 +20029,320 @@ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); } +void ThriftHiveMetastoreClient::create_cluster(const Cluster& cluster) +{ + send_create_cluster(cluster); + recv_create_cluster(); +} + +void ThriftHiveMetastoreClient::send_create_cluster(const Cluster& cluster) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_cluster_pargs args; + args.cluster = &cluster; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_cluster() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_cluster_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::drop_cluster(const std::string& cluster_name) +{ + send_drop_cluster(cluster_name); + recv_drop_cluster(); +} + +void ThriftHiveMetastoreClient::send_drop_cluster(const std::string& cluster_name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_cluster_pargs args; + args.cluster_name = &cluster_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_cluster() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_cluster_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::get_cluster(Cluster& _return, const std::string& cluster_name) +{ + send_get_cluster(cluster_name); + recv_get_cluster(_return); +} + +void ThriftHiveMetastoreClient::send_get_cluster(const std::string& cluster_name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_cluster_pargs args; + args.cluster_name = &cluster_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_cluster(Cluster& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_cluster_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_cluster failed: unknown result"); +} + +void ThriftHiveMetastoreClient::list_clusters(std::vector & _return) +{ + send_list_clusters(); + recv_list_clusters(_return); +} + +void ThriftHiveMetastoreClient::send_list_clusters() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("list_clusters", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_list_clusters_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_list_clusters(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("list_clusters") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_list_clusters_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_clusters failed: unknown result"); +} + +void ThriftHiveMetastoreClient::alter_cluster(const std::string& cluster_name, const Cluster& cluster) +{ + send_alter_cluster(cluster_name, cluster); + recv_alter_cluster(); +} + +void ThriftHiveMetastoreClient::send_alter_cluster(const std::string& cluster_name, const Cluster& cluster) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_cluster", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_cluster_pargs args; + args.cluster_name = &cluster_name; + args.cluster = &cluster; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_alter_cluster() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_cluster") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_cluster_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + return; +} + bool ThriftHiveMetastoreClient::create_role(const Role& role) { send_create_role(role); @@ -20074,6 +22088,68 @@ } } +void ThriftHiveMetastoreProcessor::process_drop_table_on_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.drop_table_on_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_table_on_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_table_on_cluster"); + } + + ThriftHiveMetastore_drop_table_on_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_table_on_cluster", bytes); + } + + ThriftHiveMetastore_drop_table_on_cluster_result result; + try { + iface_->drop_table_on_cluster(args.cluster_name, args.dbname, args.name, args.deleteData); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidOperationException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_table_on_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_table_on_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_table_on_cluster"); + } + + oprot->writeMessageBegin("drop_table_on_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_table_on_cluster", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -20745,6 +22821,69 @@ } } +void ThriftHiveMetastoreProcessor::process_drop_partition_on_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.drop_partition_on_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_partition_on_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_partition_on_cluster"); + } + + ThriftHiveMetastore_drop_partition_on_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_partition_on_cluster", bytes); + } + + ThriftHiveMetastore_drop_partition_on_cluster_result result; + try { + result.success = iface_->drop_partition_on_cluster(args.cluster_name, args.db_name, args.tbl_name, args.part_vals, args.deleteData); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (InvalidOperationException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_partition_on_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_partition_on_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_partition_on_cluster"); + } + + oprot->writeMessageBegin("drop_partition_on_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_partition_on_cluster", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_drop_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -22253,6 +24392,312 @@ } } +void ThriftHiveMetastoreProcessor::process_create_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.create_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.create_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_cluster"); + } + + ThriftHiveMetastore_create_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_cluster", bytes); + } + + ThriftHiveMetastore_create_cluster_result result; + try { + iface_->create_cluster(args.cluster); + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_cluster"); + } + + oprot->writeMessageBegin("create_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_cluster", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.drop_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_cluster"); + } + + ThriftHiveMetastore_drop_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_cluster", bytes); + } + + ThriftHiveMetastore_drop_cluster_result result; + try { + iface_->drop_cluster(args.cluster_name); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_cluster"); + } + + oprot->writeMessageBegin("drop_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_cluster", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.get_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.get_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_cluster"); + } + + ThriftHiveMetastore_get_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_cluster", bytes); + } + + ThriftHiveMetastore_get_cluster_result result; + try { + iface_->get_cluster(result.success, args.cluster_name); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_cluster"); + } + + oprot->writeMessageBegin("get_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_cluster", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_list_clusters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.list_clusters", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.list_clusters"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.list_clusters"); + } + + ThriftHiveMetastore_list_clusters_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.list_clusters", bytes); + } + + ThriftHiveMetastore_list_clusters_result result; + try { + iface_->list_clusters(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.list_clusters"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("list_clusters", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.list_clusters"); + } + + oprot->writeMessageBegin("list_clusters", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.list_clusters", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_alter_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (eventHandler_.get() != NULL) { + ctx = eventHandler_->getContext("ThriftHiveMetastore.alter_cluster", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_cluster"); + + if (eventHandler_.get() != NULL) { + eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_cluster"); + } + + ThriftHiveMetastore_alter_cluster_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_cluster", bytes); + } + + ThriftHiveMetastore_alter_cluster_result result; + try { + iface_->alter_cluster(args.cluster_name, args.cluster); + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (NoSuchObjectException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (AlreadyExistsException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (eventHandler_.get() != NULL) { + eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_cluster"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_cluster", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (eventHandler_.get() != NULL) { + eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_cluster"); + } + + oprot->writeMessageBegin("alter_cluster", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (eventHandler_.get() != NULL) { + eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_cluster", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_create_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; Index: metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp (revision 1235046) +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp (working copy) @@ -964,10 +964,10 @@ return xfer; } -const char* Database::ascii_fingerprint = "213967572143E49C9F1A23F7A866E2F5"; -const uint8_t Database::binary_fingerprint[16] = {0x21,0x39,0x67,0x57,0x21,0x43,0xE4,0x9C,0x9F,0x1A,0x23,0xF7,0xA8,0x66,0xE2,0xF5}; +const char* Cluster::ascii_fingerprint = "6971B678D1662B9E434D9D489082F97C"; +const uint8_t Cluster::binary_fingerprint[16] = {0x69,0x71,0xB6,0x78,0xD1,0x66,0x2B,0x9E,0x43,0x4D,0x9D,0x48,0x90,0x82,0xF9,0x7C}; -uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t Cluster::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -997,13 +997,21 @@ break; case 2: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->description); - this->__isset.description = true; + xfer += iprot->readString(this->comment); + this->__isset.comment = true; } else { xfer += iprot->skip(ftype); } break; case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->createTime); + this->__isset.createTime = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->locationUri); this->__isset.locationUri = true; @@ -1011,7 +1019,7 @@ xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); @@ -1034,7 +1042,243 @@ xfer += iprot->skip(ftype); } break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t Cluster::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("Cluster"); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->comment); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32(this->createTime); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("locationUri", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->locationUri); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); + std::map ::const_iterator _iter70; + for (_iter70 = this->parameters.begin(); _iter70 != this->parameters.end(); ++_iter70) + { + xfer += oprot->writeString(_iter70->first); + xfer += oprot->writeString(_iter70->second); + } + xfer += oprot->writeMapEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +const char* ClusterStorageDescriptor::ascii_fingerprint = "F831BE62975A31F6DF2D936BBB5998A3"; +const uint8_t ClusterStorageDescriptor::binary_fingerprint[16] = {0xF8,0x31,0xBE,0x62,0x97,0x5A,0x31,0xF6,0xDF,0x2D,0x93,0x6B,0xBB,0x59,0x98,0xA3}; + +uint32_t ClusterStorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->cluster.read(iprot); + this->__isset.cluster = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->location); + this->__isset.location = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->primary); + this->__isset.primary = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->dataSynced); + this->__isset.dataSynced = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 5: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->parameters.clear(); + uint32_t _size71; + ::apache::thrift::protocol::TType _ktype72; + ::apache::thrift::protocol::TType _vtype73; + iprot->readMapBegin(_ktype72, _vtype73, _size71); + uint32_t _i75; + for (_i75 = 0; _i75 < _size71; ++_i75) + { + std::string _key76; + xfer += iprot->readString(_key76); + std::string& _val77 = this->parameters[_key76]; + xfer += iprot->readString(_val77); + } + iprot->readMapEnd(); + } + this->__isset.parameters = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ClusterStorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ClusterStorageDescriptor"); + xfer += oprot->writeFieldBegin("cluster", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->cluster.write(oprot); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("location", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->location); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("primary", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->primary); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("dataSynced", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->dataSynced); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); + std::map ::const_iterator _iter78; + for (_iter78 = this->parameters.begin(); _iter78 != this->parameters.end(); ++_iter78) + { + xfer += oprot->writeString(_iter78->first); + xfer += oprot->writeString(_iter78->second); + } + xfer += oprot->writeMapEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +const char* Database::ascii_fingerprint = "213967572143E49C9F1A23F7A866E2F5"; +const uint8_t Database::binary_fingerprint[16] = {0x21,0x39,0x67,0x57,0x21,0x43,0xE4,0x9C,0x9F,0x1A,0x23,0xF7,0xA8,0x66,0xE2,0xF5}; + +uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->description); + this->__isset.description = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->locationUri); + this->__isset.locationUri = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->parameters.clear(); + uint32_t _size79; + ::apache::thrift::protocol::TType _ktype80; + ::apache::thrift::protocol::TType _vtype81; + iprot->readMapBegin(_ktype80, _vtype81, _size79); + uint32_t _i83; + for (_i83 = 0; _i83 < _size79; ++_i83) + { + std::string _key84; + xfer += iprot->readString(_key84); + std::string& _val85 = this->parameters[_key84]; + xfer += iprot->readString(_val85); + } + iprot->readMapEnd(); + } + this->__isset.parameters = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->privileges.read(iprot); this->__isset.privileges = true; @@ -1069,11 +1313,11 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter70; - for (_iter70 = this->parameters.begin(); _iter70 != this->parameters.end(); ++_iter70) + std::map ::const_iterator _iter86; + for (_iter86 = this->parameters.begin(); _iter86 != this->parameters.end(); ++_iter86) { - xfer += oprot->writeString(_iter70->first); - xfer += oprot->writeString(_iter70->second); + xfer += oprot->writeString(_iter86->first); + xfer += oprot->writeString(_iter86->second); } xfer += oprot->writeMapEnd(); } @@ -1131,17 +1375,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size71; - ::apache::thrift::protocol::TType _ktype72; - ::apache::thrift::protocol::TType _vtype73; - iprot->readMapBegin(_ktype72, _vtype73, _size71); - uint32_t _i75; - for (_i75 = 0; _i75 < _size71; ++_i75) + uint32_t _size87; + ::apache::thrift::protocol::TType _ktype88; + ::apache::thrift::protocol::TType _vtype89; + iprot->readMapBegin(_ktype88, _vtype89, _size87); + uint32_t _i91; + for (_i91 = 0; _i91 < _size87; ++_i91) { - std::string _key76; - xfer += iprot->readString(_key76); - std::string& _val77 = this->parameters[_key76]; - xfer += iprot->readString(_val77); + std::string _key92; + xfer += iprot->readString(_key92); + std::string& _val93 = this->parameters[_key92]; + xfer += iprot->readString(_val93); } iprot->readMapEnd(); } @@ -1174,11 +1418,11 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter78; - for (_iter78 = this->parameters.begin(); _iter78 != this->parameters.end(); ++_iter78) + std::map ::const_iterator _iter94; + for (_iter94 = this->parameters.begin(); _iter94 != this->parameters.end(); ++_iter94) { - xfer += oprot->writeString(_iter78->first); - xfer += oprot->writeString(_iter78->second); + xfer += oprot->writeString(_iter94->first); + xfer += oprot->writeString(_iter94->second); } xfer += oprot->writeMapEnd(); } @@ -1253,8 +1497,8 @@ return xfer; } -const char* StorageDescriptor::ascii_fingerprint = "11E4CE18F895C13812C853DFDCD1293F"; -const uint8_t StorageDescriptor::binary_fingerprint[16] = {0x11,0xE4,0xCE,0x18,0xF8,0x95,0xC1,0x38,0x12,0xC8,0x53,0xDF,0xDC,0xD1,0x29,0x3F}; +const char* StorageDescriptor::ascii_fingerprint = "6E2134C82A6349BD16FC74AAA1974949"; +const uint8_t StorageDescriptor::binary_fingerprint[16] = {0x6E,0x21,0x34,0xC8,0x2A,0x63,0x49,0xBD,0x16,0xFC,0x74,0xAA,0xA1,0x97,0x49,0x49}; uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -1280,14 +1524,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size79; - ::apache::thrift::protocol::TType _etype82; - iprot->readListBegin(_etype82, _size79); - this->cols.resize(_size79); - uint32_t _i83; - for (_i83 = 0; _i83 < _size79; ++_i83) + uint32_t _size95; + ::apache::thrift::protocol::TType _etype98; + iprot->readListBegin(_etype98, _size95); + this->cols.resize(_size95); + uint32_t _i99; + for (_i99 = 0; _i99 < _size95; ++_i99) { - xfer += this->cols[_i83].read(iprot); + xfer += this->cols[_i99].read(iprot); } iprot->readListEnd(); } @@ -1348,14 +1592,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->bucketCols.clear(); - uint32_t _size84; - ::apache::thrift::protocol::TType _etype87; - iprot->readListBegin(_etype87, _size84); - this->bucketCols.resize(_size84); - uint32_t _i88; - for (_i88 = 0; _i88 < _size84; ++_i88) + uint32_t _size100; + ::apache::thrift::protocol::TType _etype103; + iprot->readListBegin(_etype103, _size100); + this->bucketCols.resize(_size100); + uint32_t _i104; + for (_i104 = 0; _i104 < _size100; ++_i104) { - xfer += iprot->readString(this->bucketCols[_i88]); + xfer += iprot->readString(this->bucketCols[_i104]); } iprot->readListEnd(); } @@ -1368,14 +1612,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->sortCols.clear(); - uint32_t _size89; - ::apache::thrift::protocol::TType _etype92; - iprot->readListBegin(_etype92, _size89); - this->sortCols.resize(_size89); - uint32_t _i93; - for (_i93 = 0; _i93 < _size89; ++_i93) + uint32_t _size105; + ::apache::thrift::protocol::TType _etype108; + iprot->readListBegin(_etype108, _size105); + this->sortCols.resize(_size105); + uint32_t _i109; + for (_i109 = 0; _i109 < _size105; ++_i109) { - xfer += this->sortCols[_i93].read(iprot); + xfer += this->sortCols[_i109].read(iprot); } iprot->readListEnd(); } @@ -1388,17 +1632,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size94; - ::apache::thrift::protocol::TType _ktype95; - ::apache::thrift::protocol::TType _vtype96; - iprot->readMapBegin(_ktype95, _vtype96, _size94); - uint32_t _i98; - for (_i98 = 0; _i98 < _size94; ++_i98) + uint32_t _size110; + ::apache::thrift::protocol::TType _ktype111; + ::apache::thrift::protocol::TType _vtype112; + iprot->readMapBegin(_ktype111, _vtype112, _size110); + uint32_t _i114; + for (_i114 = 0; _i114 < _size110; ++_i114) { - std::string _key99; - xfer += iprot->readString(_key99); - std::string& _val100 = this->parameters[_key99]; - xfer += iprot->readString(_val100); + std::string _key115; + xfer += iprot->readString(_key115); + std::string& _val116 = this->parameters[_key115]; + xfer += iprot->readString(_val116); } iprot->readMapEnd(); } @@ -1407,6 +1651,26 @@ xfer += iprot->skip(ftype); } break; + case 11: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->clusterStorage.clear(); + uint32_t _size117; + ::apache::thrift::protocol::TType _etype120; + iprot->readListBegin(_etype120, _size117); + this->clusterStorage.resize(_size117); + uint32_t _i121; + for (_i121 = 0; _i121 < _size117; ++_i121) + { + xfer += this->clusterStorage[_i121].read(iprot); + } + iprot->readListEnd(); + } + this->__isset.clusterStorage = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1425,10 +1689,10 @@ xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter101; - for (_iter101 = this->cols.begin(); _iter101 != this->cols.end(); ++_iter101) + std::vector ::const_iterator _iter122; + for (_iter122 = this->cols.begin(); _iter122 != this->cols.end(); ++_iter122) { - xfer += (*_iter101).write(oprot); + xfer += (*_iter122).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1454,10 +1718,10 @@ xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); - std::vector ::const_iterator _iter102; - for (_iter102 = this->bucketCols.begin(); _iter102 != this->bucketCols.end(); ++_iter102) + std::vector ::const_iterator _iter123; + for (_iter123 = this->bucketCols.begin(); _iter123 != this->bucketCols.end(); ++_iter123) { - xfer += oprot->writeString((*_iter102)); + xfer += oprot->writeString((*_iter123)); } xfer += oprot->writeListEnd(); } @@ -1465,10 +1729,10 @@ xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); - std::vector ::const_iterator _iter103; - for (_iter103 = this->sortCols.begin(); _iter103 != this->sortCols.end(); ++_iter103) + std::vector ::const_iterator _iter124; + for (_iter124 = this->sortCols.begin(); _iter124 != this->sortCols.end(); ++_iter124) { - xfer += (*_iter103).write(oprot); + xfer += (*_iter124).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1476,22 +1740,35 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter104; - for (_iter104 = this->parameters.begin(); _iter104 != this->parameters.end(); ++_iter104) + std::map ::const_iterator _iter125; + for (_iter125 = this->parameters.begin(); _iter125 != this->parameters.end(); ++_iter125) { - xfer += oprot->writeString(_iter104->first); - xfer += oprot->writeString(_iter104->second); + xfer += oprot->writeString(_iter125->first); + xfer += oprot->writeString(_iter125->second); } xfer += oprot->writeMapEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.clusterStorage) { + xfer += oprot->writeFieldBegin("clusterStorage", ::apache::thrift::protocol::T_LIST, 11); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->clusterStorage.size())); + std::vector ::const_iterator _iter126; + for (_iter126 = this->clusterStorage.begin(); _iter126 != this->clusterStorage.end(); ++_iter126) + { + xfer += (*_iter126).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -const char* Table::ascii_fingerprint = "26BE788C09746068A2616712C9262900"; -const uint8_t Table::binary_fingerprint[16] = {0x26,0xBE,0x78,0x8C,0x09,0x74,0x60,0x68,0xA2,0x61,0x67,0x12,0xC9,0x26,0x29,0x00}; +const char* Table::ascii_fingerprint = "B95423545849411EAB96918BDFBF8479"; +const uint8_t Table::binary_fingerprint[16] = {0xB9,0x54,0x23,0x54,0x58,0x49,0x41,0x1E,0xAB,0x96,0x91,0x8B,0xDF,0xBF,0x84,0x79}; uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -1573,14 +1850,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size105; - ::apache::thrift::protocol::TType _etype108; - iprot->readListBegin(_etype108, _size105); - this->partitionKeys.resize(_size105); - uint32_t _i109; - for (_i109 = 0; _i109 < _size105; ++_i109) + uint32_t _size127; + ::apache::thrift::protocol::TType _etype130; + iprot->readListBegin(_etype130, _size127); + this->partitionKeys.resize(_size127); + uint32_t _i131; + for (_i131 = 0; _i131 < _size127; ++_i131) { - xfer += this->partitionKeys[_i109].read(iprot); + xfer += this->partitionKeys[_i131].read(iprot); } iprot->readListEnd(); } @@ -1593,17 +1870,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size110; - ::apache::thrift::protocol::TType _ktype111; - ::apache::thrift::protocol::TType _vtype112; - iprot->readMapBegin(_ktype111, _vtype112, _size110); - uint32_t _i114; - for (_i114 = 0; _i114 < _size110; ++_i114) + uint32_t _size132; + ::apache::thrift::protocol::TType _ktype133; + ::apache::thrift::protocol::TType _vtype134; + iprot->readMapBegin(_ktype133, _vtype134, _size132); + uint32_t _i136; + for (_i136 = 0; _i136 < _size132; ++_i136) { - std::string _key115; - xfer += iprot->readString(_key115); - std::string& _val116 = this->parameters[_key115]; - xfer += iprot->readString(_val116); + std::string _key137; + xfer += iprot->readString(_key137); + std::string& _val138 = this->parameters[_key137]; + xfer += iprot->readString(_val138); } iprot->readMapEnd(); } @@ -1683,10 +1960,10 @@ xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter117; - for (_iter117 = this->partitionKeys.begin(); _iter117 != this->partitionKeys.end(); ++_iter117) + std::vector ::const_iterator _iter139; + for (_iter139 = this->partitionKeys.begin(); _iter139 != this->partitionKeys.end(); ++_iter139) { - xfer += (*_iter117).write(oprot); + xfer += (*_iter139).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1694,11 +1971,11 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter118; - for (_iter118 = this->parameters.begin(); _iter118 != this->parameters.end(); ++_iter118) + std::map ::const_iterator _iter140; + for (_iter140 = this->parameters.begin(); _iter140 != this->parameters.end(); ++_iter140) { - xfer += oprot->writeString(_iter118->first); - xfer += oprot->writeString(_iter118->second); + xfer += oprot->writeString(_iter140->first); + xfer += oprot->writeString(_iter140->second); } xfer += oprot->writeMapEnd(); } @@ -1722,8 +1999,8 @@ return xfer; } -const char* Partition::ascii_fingerprint = "F480E1D1B8AEBDDB37F8E180C0F07395"; -const uint8_t Partition::binary_fingerprint[16] = {0xF4,0x80,0xE1,0xD1,0xB8,0xAE,0xBD,0xDB,0x37,0xF8,0xE1,0x80,0xC0,0xF0,0x73,0x95}; +const char* Partition::ascii_fingerprint = "8108795CC9429AC2FE53E4749B040EF8"; +const uint8_t Partition::binary_fingerprint[16] = {0x81,0x08,0x79,0x5C,0xC9,0x42,0x9A,0xC2,0xFE,0x53,0xE4,0x74,0x9B,0x04,0x0E,0xF8}; uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -1749,14 +2026,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size119; - ::apache::thrift::protocol::TType _etype122; - iprot->readListBegin(_etype122, _size119); - this->values.resize(_size119); - uint32_t _i123; - for (_i123 = 0; _i123 < _size119; ++_i123) + uint32_t _size141; + ::apache::thrift::protocol::TType _etype144; + iprot->readListBegin(_etype144, _size141); + this->values.resize(_size141); + uint32_t _i145; + for (_i145 = 0; _i145 < _size141; ++_i145) { - xfer += iprot->readString(this->values[_i123]); + xfer += iprot->readString(this->values[_i145]); } iprot->readListEnd(); } @@ -1809,17 +2086,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size124; - ::apache::thrift::protocol::TType _ktype125; - ::apache::thrift::protocol::TType _vtype126; - iprot->readMapBegin(_ktype125, _vtype126, _size124); - uint32_t _i128; - for (_i128 = 0; _i128 < _size124; ++_i128) + uint32_t _size146; + ::apache::thrift::protocol::TType _ktype147; + ::apache::thrift::protocol::TType _vtype148; + iprot->readMapBegin(_ktype147, _vtype148, _size146); + uint32_t _i150; + for (_i150 = 0; _i150 < _size146; ++_i150) { - std::string _key129; - xfer += iprot->readString(_key129); - std::string& _val130 = this->parameters[_key129]; - xfer += iprot->readString(_val130); + std::string _key151; + xfer += iprot->readString(_key151); + std::string& _val152 = this->parameters[_key151]; + xfer += iprot->readString(_val152); } iprot->readMapEnd(); } @@ -1854,10 +2131,10 @@ xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter131; - for (_iter131 = this->values.begin(); _iter131 != this->values.end(); ++_iter131) + std::vector ::const_iterator _iter153; + for (_iter153 = this->values.begin(); _iter153 != this->values.end(); ++_iter153) { - xfer += oprot->writeString((*_iter131)); + xfer += oprot->writeString((*_iter153)); } xfer += oprot->writeListEnd(); } @@ -1880,11 +2157,11 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter132; - for (_iter132 = this->parameters.begin(); _iter132 != this->parameters.end(); ++_iter132) + std::map ::const_iterator _iter154; + for (_iter154 = this->parameters.begin(); _iter154 != this->parameters.end(); ++_iter154) { - xfer += oprot->writeString(_iter132->first); - xfer += oprot->writeString(_iter132->second); + xfer += oprot->writeString(_iter154->first); + xfer += oprot->writeString(_iter154->second); } xfer += oprot->writeMapEnd(); } @@ -1899,8 +2176,8 @@ return xfer; } -const char* Index::ascii_fingerprint = "5FEE4F7E28935B644F207D74459F6A29"; -const uint8_t Index::binary_fingerprint[16] = {0x5F,0xEE,0x4F,0x7E,0x28,0x93,0x5B,0x64,0x4F,0x20,0x7D,0x74,0x45,0x9F,0x6A,0x29}; +const char* Index::ascii_fingerprint = "45A49EB1C8E638AF0A68D3368EDB1E88"; +const uint8_t Index::binary_fingerprint[16] = {0x45,0xA4,0x9E,0xB1,0xC8,0xE6,0x38,0xAF,0x0A,0x68,0xD3,0x36,0x8E,0xDB,0x1E,0x88}; uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -1990,17 +2267,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size133; - ::apache::thrift::protocol::TType _ktype134; - ::apache::thrift::protocol::TType _vtype135; - iprot->readMapBegin(_ktype134, _vtype135, _size133); - uint32_t _i137; - for (_i137 = 0; _i137 < _size133; ++_i137) + uint32_t _size155; + ::apache::thrift::protocol::TType _ktype156; + ::apache::thrift::protocol::TType _vtype157; + iprot->readMapBegin(_ktype156, _vtype157, _size155); + uint32_t _i159; + for (_i159 = 0; _i159 < _size155; ++_i159) { - std::string _key138; - xfer += iprot->readString(_key138); - std::string& _val139 = this->parameters[_key138]; - xfer += iprot->readString(_val139); + std::string _key160; + xfer += iprot->readString(_key160); + std::string& _val161 = this->parameters[_key160]; + xfer += iprot->readString(_val161); } iprot->readMapEnd(); } @@ -2059,11 +2336,11 @@ xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter140; - for (_iter140 = this->parameters.begin(); _iter140 != this->parameters.end(); ++_iter140) + std::map ::const_iterator _iter162; + for (_iter162 = this->parameters.begin(); _iter162 != this->parameters.end(); ++_iter162) { - xfer += oprot->writeString(_iter140->first); - xfer += oprot->writeString(_iter140->second); + xfer += oprot->writeString(_iter162->first); + xfer += oprot->writeString(_iter162->second); } xfer += oprot->writeMapEnd(); } @@ -2103,14 +2380,14 @@ if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size141; - ::apache::thrift::protocol::TType _etype144; - iprot->readListBegin(_etype144, _size141); - this->fieldSchemas.resize(_size141); - uint32_t _i145; - for (_i145 = 0; _i145 < _size141; ++_i145) + uint32_t _size163; + ::apache::thrift::protocol::TType _etype166; + iprot->readListBegin(_etype166, _size163); + this->fieldSchemas.resize(_size163); + uint32_t _i167; + for (_i167 = 0; _i167 < _size163; ++_i167) { - xfer += this->fieldSchemas[_i145].read(iprot); + xfer += this->fieldSchemas[_i167].read(iprot); } iprot->readListEnd(); } @@ -2123,17 +2400,17 @@ if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size146; - ::apache::thrift::protocol::TType _ktype147; - ::apache::thrift::protocol::TType _vtype148; - iprot->readMapBegin(_ktype147, _vtype148, _size146); - uint32_t _i150; - for (_i150 = 0; _i150 < _size146; ++_i150) + uint32_t _size168; + ::apache::thrift::protocol::TType _ktype169; + ::apache::thrift::protocol::TType _vtype170; + iprot->readMapBegin(_ktype169, _vtype170, _size168); + uint32_t _i172; + for (_i172 = 0; _i172 < _size168; ++_i172) { - std::string _key151; - xfer += iprot->readString(_key151); - std::string& _val152 = this->properties[_key151]; - xfer += iprot->readString(_val152); + std::string _key173; + xfer += iprot->readString(_key173); + std::string& _val174 = this->properties[_key173]; + xfer += iprot->readString(_val174); } iprot->readMapEnd(); } @@ -2160,10 +2437,10 @@ xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter153; - for (_iter153 = this->fieldSchemas.begin(); _iter153 != this->fieldSchemas.end(); ++_iter153) + std::vector ::const_iterator _iter175; + for (_iter175 = this->fieldSchemas.begin(); _iter175 != this->fieldSchemas.end(); ++_iter175) { - xfer += (*_iter153).write(oprot); + xfer += (*_iter175).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2171,11 +2448,11 @@ xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter154; - for (_iter154 = this->properties.begin(); _iter154 != this->properties.end(); ++_iter154) + std::map ::const_iterator _iter176; + for (_iter176 = this->properties.begin(); _iter176 != this->properties.end(); ++_iter176) { - xfer += oprot->writeString(_iter154->first); - xfer += oprot->writeString(_iter154->second); + xfer += oprot->writeString(_iter176->first); + xfer += oprot->writeString(_iter176->second); } xfer += oprot->writeMapEnd(); } Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (revision 1235046) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h (working copy) @@ -29,6 +29,7 @@ virtual void get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) = 0; virtual void create_table(const Table& tbl) = 0; virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0; + virtual void drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData) = 0; virtual void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) = 0; virtual void get_all_tables(std::vector & _return, const std::string& db_name) = 0; virtual void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) = 0; @@ -40,6 +41,7 @@ virtual void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) = 0; virtual void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0; virtual bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) = 0; + virtual bool drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) = 0; virtual bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) = 0; virtual void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) = 0; virtual void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) = 0; @@ -65,6 +67,11 @@ virtual void get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) = 0; virtual void get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) = 0; virtual void get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) = 0; + virtual void create_cluster(const Cluster& cluster) = 0; + virtual void drop_cluster(const std::string& cluster_name) = 0; + virtual void get_cluster(Cluster& _return, const std::string& cluster_name) = 0; + virtual void list_clusters(std::vector & _return) = 0; + virtual void alter_cluster(const std::string& cluster_name, const Cluster& cluster) = 0; virtual bool create_role(const Role& role) = 0; virtual bool drop_role(const std::string& role_name) = 0; virtual void get_role_names(std::vector & _return) = 0; @@ -128,6 +135,9 @@ void drop_table(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */) { return; } + void drop_table_on_cluster(const std::string& /* cluster_name */, const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */) { + return; + } void get_tables(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */) { return; } @@ -163,6 +173,10 @@ bool _return = false; return _return; } + bool drop_partition_on_cluster(const std::string& /* cluster_name */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* part_vals */, const bool /* deleteData */) { + bool _return = false; + return _return; + } bool drop_partition_by_name(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const bool /* deleteData */) { bool _return = false; return _return; @@ -241,6 +255,21 @@ void get_index_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_indexes */) { return; } + void create_cluster(const Cluster& /* cluster */) { + return; + } + void drop_cluster(const std::string& /* cluster_name */) { + return; + } + void get_cluster(Cluster& /* _return */, const std::string& /* cluster_name */) { + return; + } + void list_clusters(std::vector & /* _return */) { + return; + } + void alter_cluster(const std::string& /* cluster_name */, const Cluster& /* cluster */) { + return; + } bool create_role(const Role& /* role */) { bool _return = false; return _return; @@ -2123,6 +2152,161 @@ }; +typedef struct _ThriftHiveMetastore_drop_table_on_cluster_args__isset { + _ThriftHiveMetastore_drop_table_on_cluster_args__isset() : cluster_name(false), dbname(false), name(false), deleteData(false) {} + bool cluster_name; + bool dbname; + bool name; + bool deleteData; +} _ThriftHiveMetastore_drop_table_on_cluster_args__isset; + +class ThriftHiveMetastore_drop_table_on_cluster_args { + public: + + ThriftHiveMetastore_drop_table_on_cluster_args() : cluster_name(""), dbname(""), name(""), deleteData(0) { + } + + virtual ~ThriftHiveMetastore_drop_table_on_cluster_args() throw() {} + + std::string cluster_name; + std::string dbname; + std::string name; + bool deleteData; + + _ThriftHiveMetastore_drop_table_on_cluster_args__isset __isset; + + void __set_cluster_name(const std::string& val) { + cluster_name = val; + } + + void __set_dbname(const std::string& val) { + dbname = val; + } + + void __set_name(const std::string& val) { + name = val; + } + + void __set_deleteData(const bool val) { + deleteData = val; + } + + bool operator == (const ThriftHiveMetastore_drop_table_on_cluster_args & rhs) const + { + if (!(cluster_name == rhs.cluster_name)) + return false; + if (!(dbname == rhs.dbname)) + return false; + if (!(name == rhs.name)) + return false; + if (!(deleteData == rhs.deleteData)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_table_on_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_table_on_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_table_on_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_table_on_cluster_pargs() throw() {} + + const std::string* cluster_name; + const std::string* dbname; + const std::string* name; + const bool* deleteData; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_table_on_cluster_result__isset { + _ThriftHiveMetastore_drop_table_on_cluster_result__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_table_on_cluster_result__isset; + +class ThriftHiveMetastore_drop_table_on_cluster_result { + public: + + ThriftHiveMetastore_drop_table_on_cluster_result() { + } + + virtual ~ThriftHiveMetastore_drop_table_on_cluster_result() throw() {} + + NoSuchObjectException o1; + MetaException o2; + InvalidOperationException o3; + + _ThriftHiveMetastore_drop_table_on_cluster_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val) { + o1 = val; + } + + void __set_o2(const MetaException& val) { + o2 = val; + } + + void __set_o3(const InvalidOperationException& val) { + o3 = val; + } + + bool operator == (const ThriftHiveMetastore_drop_table_on_cluster_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_table_on_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_table_on_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_table_on_cluster_presult__isset { + _ThriftHiveMetastore_drop_table_on_cluster_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_table_on_cluster_presult__isset; + +class ThriftHiveMetastore_drop_table_on_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_table_on_cluster_presult() throw() {} + + NoSuchObjectException o1; + MetaException o2; + InvalidOperationException o3; + + _ThriftHiveMetastore_drop_table_on_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_get_tables_args__isset { _ThriftHiveMetastore_get_tables_args__isset() : db_name(false), pattern(false) {} bool db_name; @@ -3687,6 +3871,180 @@ }; +typedef struct _ThriftHiveMetastore_drop_partition_on_cluster_args__isset { + _ThriftHiveMetastore_drop_partition_on_cluster_args__isset() : cluster_name(false), db_name(false), tbl_name(false), part_vals(false), deleteData(false) {} + bool cluster_name; + bool db_name; + bool tbl_name; + bool part_vals; + bool deleteData; +} _ThriftHiveMetastore_drop_partition_on_cluster_args__isset; + +class ThriftHiveMetastore_drop_partition_on_cluster_args { + public: + + ThriftHiveMetastore_drop_partition_on_cluster_args() : cluster_name(""), db_name(""), tbl_name(""), deleteData(0) { + } + + virtual ~ThriftHiveMetastore_drop_partition_on_cluster_args() throw() {} + + std::string cluster_name; + std::string db_name; + std::string tbl_name; + std::vector part_vals; + bool deleteData; + + _ThriftHiveMetastore_drop_partition_on_cluster_args__isset __isset; + + void __set_cluster_name(const std::string& val) { + cluster_name = val; + } + + void __set_db_name(const std::string& val) { + db_name = val; + } + + void __set_tbl_name(const std::string& val) { + tbl_name = val; + } + + void __set_part_vals(const std::vector & val) { + part_vals = val; + } + + void __set_deleteData(const bool val) { + deleteData = val; + } + + bool operator == (const ThriftHiveMetastore_drop_partition_on_cluster_args & rhs) const + { + if (!(cluster_name == rhs.cluster_name)) + return false; + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(part_vals == rhs.part_vals)) + return false; + if (!(deleteData == rhs.deleteData)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_partition_on_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_partition_on_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_partition_on_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_partition_on_cluster_pargs() throw() {} + + const std::string* cluster_name; + const std::string* db_name; + const std::string* tbl_name; + const std::vector * part_vals; + const bool* deleteData; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_partition_on_cluster_result__isset { + _ThriftHiveMetastore_drop_partition_on_cluster_result__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success; + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_partition_on_cluster_result__isset; + +class ThriftHiveMetastore_drop_partition_on_cluster_result { + public: + + ThriftHiveMetastore_drop_partition_on_cluster_result() : success(0) { + } + + virtual ~ThriftHiveMetastore_drop_partition_on_cluster_result() throw() {} + + bool success; + NoSuchObjectException o1; + MetaException o2; + InvalidOperationException o3; + + _ThriftHiveMetastore_drop_partition_on_cluster_result__isset __isset; + + void __set_success(const bool val) { + success = val; + } + + void __set_o1(const NoSuchObjectException& val) { + o1 = val; + } + + void __set_o2(const MetaException& val) { + o2 = val; + } + + void __set_o3(const InvalidOperationException& val) { + o3 = val; + } + + bool operator == (const ThriftHiveMetastore_drop_partition_on_cluster_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_partition_on_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_partition_on_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_partition_on_cluster_presult__isset { + _ThriftHiveMetastore_drop_partition_on_cluster_presult__isset() : success(false), o1(false), o2(false), o3(false) {} + bool success; + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_partition_on_cluster_presult__isset; + +class ThriftHiveMetastore_drop_partition_on_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_partition_on_cluster_presult() throw() {} + + bool* success; + NoSuchObjectException o1; + MetaException o2; + InvalidOperationException o3; + + _ThriftHiveMetastore_drop_partition_on_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_drop_partition_by_name_args__isset { _ThriftHiveMetastore_drop_partition_by_name_args__isset() : db_name(false), tbl_name(false), part_name(false), deleteData(false) {} bool db_name; @@ -7427,6 +7785,641 @@ }; +typedef struct _ThriftHiveMetastore_create_cluster_args__isset { + _ThriftHiveMetastore_create_cluster_args__isset() : cluster(false) {} + bool cluster; +} _ThriftHiveMetastore_create_cluster_args__isset; + +class ThriftHiveMetastore_create_cluster_args { + public: + + ThriftHiveMetastore_create_cluster_args() { + } + + virtual ~ThriftHiveMetastore_create_cluster_args() throw() {} + + Cluster cluster; + + _ThriftHiveMetastore_create_cluster_args__isset __isset; + + void __set_cluster(const Cluster& val) { + cluster = val; + } + + bool operator == (const ThriftHiveMetastore_create_cluster_args & rhs) const + { + if (!(cluster == rhs.cluster)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_cluster_pargs() throw() {} + + const Cluster* cluster; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_cluster_result__isset { + _ThriftHiveMetastore_create_cluster_result__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_create_cluster_result__isset; + +class ThriftHiveMetastore_create_cluster_result { + public: + + ThriftHiveMetastore_create_cluster_result() { + } + + virtual ~ThriftHiveMetastore_create_cluster_result() throw() {} + + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_cluster_result__isset __isset; + + void __set_o1(const AlreadyExistsException& val) { + o1 = val; + } + + void __set_o2(const InvalidObjectException& val) { + o2 = val; + } + + void __set_o3(const MetaException& val) { + o3 = val; + } + + bool operator == (const ThriftHiveMetastore_create_cluster_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_cluster_presult__isset { + _ThriftHiveMetastore_create_cluster_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_create_cluster_presult__isset; + +class ThriftHiveMetastore_create_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_create_cluster_presult() throw() {} + + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_cluster_args__isset { + _ThriftHiveMetastore_drop_cluster_args__isset() : cluster_name(false) {} + bool cluster_name; +} _ThriftHiveMetastore_drop_cluster_args__isset; + +class ThriftHiveMetastore_drop_cluster_args { + public: + + ThriftHiveMetastore_drop_cluster_args() : cluster_name("") { + } + + virtual ~ThriftHiveMetastore_drop_cluster_args() throw() {} + + std::string cluster_name; + + _ThriftHiveMetastore_drop_cluster_args__isset __isset; + + void __set_cluster_name(const std::string& val) { + cluster_name = val; + } + + bool operator == (const ThriftHiveMetastore_drop_cluster_args & rhs) const + { + if (!(cluster_name == rhs.cluster_name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_cluster_pargs() throw() {} + + const std::string* cluster_name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_cluster_result__isset { + _ThriftHiveMetastore_drop_cluster_result__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_cluster_result__isset; + +class ThriftHiveMetastore_drop_cluster_result { + public: + + ThriftHiveMetastore_drop_cluster_result() { + } + + virtual ~ThriftHiveMetastore_drop_cluster_result() throw() {} + + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_cluster_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val) { + o1 = val; + } + + void __set_o2(const InvalidOperationException& val) { + o2 = val; + } + + void __set_o3(const MetaException& val) { + o3 = val; + } + + bool operator == (const ThriftHiveMetastore_drop_cluster_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_cluster_presult__isset { + _ThriftHiveMetastore_drop_cluster_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1; + bool o2; + bool o3; +} _ThriftHiveMetastore_drop_cluster_presult__isset; + +class ThriftHiveMetastore_drop_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_cluster_presult() throw() {} + + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_cluster_args__isset { + _ThriftHiveMetastore_get_cluster_args__isset() : cluster_name(false) {} + bool cluster_name; +} _ThriftHiveMetastore_get_cluster_args__isset; + +class ThriftHiveMetastore_get_cluster_args { + public: + + ThriftHiveMetastore_get_cluster_args() : cluster_name("") { + } + + virtual ~ThriftHiveMetastore_get_cluster_args() throw() {} + + std::string cluster_name; + + _ThriftHiveMetastore_get_cluster_args__isset __isset; + + void __set_cluster_name(const std::string& val) { + cluster_name = val; + } + + bool operator == (const ThriftHiveMetastore_get_cluster_args & rhs) const + { + if (!(cluster_name == rhs.cluster_name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_cluster_pargs() throw() {} + + const std::string* cluster_name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_cluster_result__isset { + _ThriftHiveMetastore_get_cluster_result__isset() : success(false), o1(false), o2(false) {} + bool success; + bool o1; + bool o2; +} _ThriftHiveMetastore_get_cluster_result__isset; + +class ThriftHiveMetastore_get_cluster_result { + public: + + ThriftHiveMetastore_get_cluster_result() { + } + + virtual ~ThriftHiveMetastore_get_cluster_result() throw() {} + + Cluster success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_cluster_result__isset __isset; + + void __set_success(const Cluster& val) { + success = val; + } + + void __set_o1(const MetaException& val) { + o1 = val; + } + + void __set_o2(const NoSuchObjectException& val) { + o2 = val; + } + + bool operator == (const ThriftHiveMetastore_get_cluster_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_cluster_presult__isset { + _ThriftHiveMetastore_get_cluster_presult__isset() : success(false), o1(false), o2(false) {} + bool success; + bool o1; + bool o2; +} _ThriftHiveMetastore_get_cluster_presult__isset; + +class ThriftHiveMetastore_get_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_get_cluster_presult() throw() {} + + Cluster* success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHiveMetastore_list_clusters_args { + public: + + ThriftHiveMetastore_list_clusters_args() { + } + + virtual ~ThriftHiveMetastore_list_clusters_args() throw() {} + + + bool operator == (const ThriftHiveMetastore_list_clusters_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_list_clusters_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_list_clusters_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_list_clusters_pargs { + public: + + + virtual ~ThriftHiveMetastore_list_clusters_pargs() throw() {} + + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_list_clusters_result__isset { + _ThriftHiveMetastore_list_clusters_result__isset() : success(false), o1(false) {} + bool success; + bool o1; +} _ThriftHiveMetastore_list_clusters_result__isset; + +class ThriftHiveMetastore_list_clusters_result { + public: + + ThriftHiveMetastore_list_clusters_result() { + } + + virtual ~ThriftHiveMetastore_list_clusters_result() throw() {} + + std::vector success; + MetaException o1; + + _ThriftHiveMetastore_list_clusters_result__isset __isset; + + void __set_success(const std::vector & val) { + success = val; + } + + void __set_o1(const MetaException& val) { + o1 = val; + } + + bool operator == (const ThriftHiveMetastore_list_clusters_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_list_clusters_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_list_clusters_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_list_clusters_presult__isset { + _ThriftHiveMetastore_list_clusters_presult__isset() : success(false), o1(false) {} + bool success; + bool o1; +} _ThriftHiveMetastore_list_clusters_presult__isset; + +class ThriftHiveMetastore_list_clusters_presult { + public: + + + virtual ~ThriftHiveMetastore_list_clusters_presult() throw() {} + + std::vector * success; + MetaException o1; + + _ThriftHiveMetastore_list_clusters_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_alter_cluster_args__isset { + _ThriftHiveMetastore_alter_cluster_args__isset() : cluster_name(false), cluster(false) {} + bool cluster_name; + bool cluster; +} _ThriftHiveMetastore_alter_cluster_args__isset; + +class ThriftHiveMetastore_alter_cluster_args { + public: + + ThriftHiveMetastore_alter_cluster_args() : cluster_name("") { + } + + virtual ~ThriftHiveMetastore_alter_cluster_args() throw() {} + + std::string cluster_name; + Cluster cluster; + + _ThriftHiveMetastore_alter_cluster_args__isset __isset; + + void __set_cluster_name(const std::string& val) { + cluster_name = val; + } + + void __set_cluster(const Cluster& val) { + cluster = val; + } + + bool operator == (const ThriftHiveMetastore_alter_cluster_args & rhs) const + { + if (!(cluster_name == rhs.cluster_name)) + return false; + if (!(cluster == rhs.cluster)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_cluster_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_cluster_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_alter_cluster_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_cluster_pargs() throw() {} + + const std::string* cluster_name; + const Cluster* cluster; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_cluster_result__isset { + _ThriftHiveMetastore_alter_cluster_result__isset() : o1(false), o2(false), o3(false), o4(false) {} + bool o1; + bool o2; + bool o3; + bool o4; +} _ThriftHiveMetastore_alter_cluster_result__isset; + +class ThriftHiveMetastore_alter_cluster_result { + public: + + ThriftHiveMetastore_alter_cluster_result() { + } + + virtual ~ThriftHiveMetastore_alter_cluster_result() throw() {} + + MetaException o1; + InvalidObjectException o2; + NoSuchObjectException o3; + AlreadyExistsException o4; + + _ThriftHiveMetastore_alter_cluster_result__isset __isset; + + void __set_o1(const MetaException& val) { + o1 = val; + } + + void __set_o2(const InvalidObjectException& val) { + o2 = val; + } + + void __set_o3(const NoSuchObjectException& val) { + o3 = val; + } + + void __set_o4(const AlreadyExistsException& val) { + o4 = val; + } + + bool operator == (const ThriftHiveMetastore_alter_cluster_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o4 == rhs.o4)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_cluster_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_cluster_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_cluster_presult__isset { + _ThriftHiveMetastore_alter_cluster_presult__isset() : o1(false), o2(false), o3(false), o4(false) {} + bool o1; + bool o2; + bool o3; + bool o4; +} _ThriftHiveMetastore_alter_cluster_presult__isset; + +class ThriftHiveMetastore_alter_cluster_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_cluster_presult() throw() {} + + MetaException o1; + InvalidObjectException o2; + NoSuchObjectException o3; + AlreadyExistsException o4; + + _ThriftHiveMetastore_alter_cluster_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_create_role_args__isset { _ThriftHiveMetastore_create_role_args__isset() : role(false) {} bool role; @@ -9235,6 +10228,9 @@ void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); void recv_drop_table(); + void drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData); + void send_drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData); + void recv_drop_table_on_cluster(); void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern); void send_get_tables(const std::string& db_name, const std::string& pattern); void recv_get_tables(std::vector & _return); @@ -9268,6 +10264,9 @@ bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); bool recv_drop_partition(); + bool drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + void send_drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData); + bool recv_drop_partition_on_cluster(); bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData); bool recv_drop_partition_by_name(); @@ -9343,6 +10342,21 @@ void get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); void send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); void recv_get_index_names(std::vector & _return); + void create_cluster(const Cluster& cluster); + void send_create_cluster(const Cluster& cluster); + void recv_create_cluster(); + void drop_cluster(const std::string& cluster_name); + void send_drop_cluster(const std::string& cluster_name); + void recv_drop_cluster(); + void get_cluster(Cluster& _return, const std::string& cluster_name); + void send_get_cluster(const std::string& cluster_name); + void recv_get_cluster(Cluster& _return); + void list_clusters(std::vector & _return); + void send_list_clusters(); + void recv_list_clusters(std::vector & _return); + void alter_cluster(const std::string& cluster_name, const Cluster& cluster); + void send_alter_cluster(const std::string& cluster_name, const Cluster& cluster); + void recv_alter_cluster(); bool create_role(const Role& role); void send_create_role(const Role& role); bool recv_create_role(); @@ -9407,6 +10421,7 @@ void process_get_schema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_create_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_table_on_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_all_tables(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -9418,6 +10433,7 @@ void process_append_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_append_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_partition_on_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_partition_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_partition_with_auth(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -9443,6 +10459,11 @@ void process_get_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_indexes(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_index_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_list_clusters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_alter_cluster(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_create_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_role(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_role_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -9475,6 +10496,7 @@ processMap_["get_schema"] = &ThriftHiveMetastoreProcessor::process_get_schema; processMap_["create_table"] = &ThriftHiveMetastoreProcessor::process_create_table; processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table; + processMap_["drop_table_on_cluster"] = &ThriftHiveMetastoreProcessor::process_drop_table_on_cluster; processMap_["get_tables"] = &ThriftHiveMetastoreProcessor::process_get_tables; processMap_["get_all_tables"] = &ThriftHiveMetastoreProcessor::process_get_all_tables; processMap_["get_table"] = &ThriftHiveMetastoreProcessor::process_get_table; @@ -9486,6 +10508,7 @@ processMap_["append_partition"] = &ThriftHiveMetastoreProcessor::process_append_partition; processMap_["append_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_append_partition_by_name; processMap_["drop_partition"] = &ThriftHiveMetastoreProcessor::process_drop_partition; + processMap_["drop_partition_on_cluster"] = &ThriftHiveMetastoreProcessor::process_drop_partition_on_cluster; processMap_["drop_partition_by_name"] = &ThriftHiveMetastoreProcessor::process_drop_partition_by_name; processMap_["get_partition"] = &ThriftHiveMetastoreProcessor::process_get_partition; processMap_["get_partition_with_auth"] = &ThriftHiveMetastoreProcessor::process_get_partition_with_auth; @@ -9511,6 +10534,11 @@ processMap_["get_index_by_name"] = &ThriftHiveMetastoreProcessor::process_get_index_by_name; processMap_["get_indexes"] = &ThriftHiveMetastoreProcessor::process_get_indexes; processMap_["get_index_names"] = &ThriftHiveMetastoreProcessor::process_get_index_names; + processMap_["create_cluster"] = &ThriftHiveMetastoreProcessor::process_create_cluster; + processMap_["drop_cluster"] = &ThriftHiveMetastoreProcessor::process_drop_cluster; + processMap_["get_cluster"] = &ThriftHiveMetastoreProcessor::process_get_cluster; + processMap_["list_clusters"] = &ThriftHiveMetastoreProcessor::process_list_clusters; + processMap_["alter_cluster"] = &ThriftHiveMetastoreProcessor::process_alter_cluster; processMap_["create_role"] = &ThriftHiveMetastoreProcessor::process_create_role; processMap_["drop_role"] = &ThriftHiveMetastoreProcessor::process_drop_role; processMap_["get_role_names"] = &ThriftHiveMetastoreProcessor::process_get_role_names; @@ -9689,6 +10717,13 @@ } } + void drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + ifaces_[i]->drop_table_on_cluster(cluster_name, dbname, name, deleteData); + } + } + void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { size_t sz = ifaces_.size(); for (size_t i = 0; i < sz; ++i) { @@ -9814,6 +10849,17 @@ } } + bool drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + return ifaces_[i]->drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData); + } else { + ifaces_[i]->drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData); + } + } + } + bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { size_t sz = ifaces_.size(); for (size_t i = 0; i < sz; ++i) { @@ -10091,6 +11137,51 @@ } } + void create_cluster(const Cluster& cluster) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + ifaces_[i]->create_cluster(cluster); + } + } + + void drop_cluster(const std::string& cluster_name) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + ifaces_[i]->drop_cluster(cluster_name); + } + } + + void get_cluster(Cluster& _return, const std::string& cluster_name) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + ifaces_[i]->get_cluster(_return, cluster_name); + return; + } else { + ifaces_[i]->get_cluster(_return, cluster_name); + } + } + } + + void list_clusters(std::vector & _return) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + if (i == sz - 1) { + ifaces_[i]->list_clusters(_return); + return; + } else { + ifaces_[i]->list_clusters(_return); + } + } + } + + void alter_cluster(const std::string& cluster_name, const Cluster& cluster) { + size_t sz = ifaces_.size(); + for (size_t i = 0; i < sz; ++i) { + ifaces_[i]->alter_cluster(cluster_name, cluster); + } + } + bool create_role(const Role& role) { size_t sz = ifaces_.size(); for (size_t i = 0; i < sz; ++i) { Index: metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h =================================================================== --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h (revision 1235046) +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h (working copy) @@ -592,6 +592,152 @@ }; +typedef struct _Cluster__isset { + _Cluster__isset() : name(false), comment(false), createTime(false), locationUri(false), parameters(false) {} + bool name; + bool comment; + bool createTime; + bool locationUri; + bool parameters; +} _Cluster__isset; + +class Cluster { + public: + + static const char* ascii_fingerprint; // = "6971B678D1662B9E434D9D489082F97C"; + static const uint8_t binary_fingerprint[16]; // = {0x69,0x71,0xB6,0x78,0xD1,0x66,0x2B,0x9E,0x43,0x4D,0x9D,0x48,0x90,0x82,0xF9,0x7C}; + + Cluster() : name(""), comment(""), createTime(0), locationUri("") { + } + + virtual ~Cluster() throw() {} + + std::string name; + std::string comment; + int32_t createTime; + std::string locationUri; + std::map parameters; + + _Cluster__isset __isset; + + void __set_name(const std::string& val) { + name = val; + } + + void __set_comment(const std::string& val) { + comment = val; + } + + void __set_createTime(const int32_t val) { + createTime = val; + } + + void __set_locationUri(const std::string& val) { + locationUri = val; + } + + void __set_parameters(const std::map & val) { + parameters = val; + } + + bool operator == (const Cluster & rhs) const + { + if (!(name == rhs.name)) + return false; + if (!(comment == rhs.comment)) + return false; + if (!(createTime == rhs.createTime)) + return false; + if (!(locationUri == rhs.locationUri)) + return false; + if (!(parameters == rhs.parameters)) + return false; + return true; + } + bool operator != (const Cluster &rhs) const { + return !(*this == rhs); + } + + bool operator < (const Cluster & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ClusterStorageDescriptor__isset { + _ClusterStorageDescriptor__isset() : cluster(false), location(false), primary(false), dataSynced(false), parameters(false) {} + bool cluster; + bool location; + bool primary; + bool dataSynced; + bool parameters; +} _ClusterStorageDescriptor__isset; + +class ClusterStorageDescriptor { + public: + + static const char* ascii_fingerprint; // = "F831BE62975A31F6DF2D936BBB5998A3"; + static const uint8_t binary_fingerprint[16]; // = {0xF8,0x31,0xBE,0x62,0x97,0x5A,0x31,0xF6,0xDF,0x2D,0x93,0x6B,0xBB,0x59,0x98,0xA3}; + + ClusterStorageDescriptor() : location(""), primary(0), dataSynced(0) { + } + + virtual ~ClusterStorageDescriptor() throw() {} + + Cluster cluster; + std::string location; + bool primary; + bool dataSynced; + std::map parameters; + + _ClusterStorageDescriptor__isset __isset; + + void __set_cluster(const Cluster& val) { + cluster = val; + } + + void __set_location(const std::string& val) { + location = val; + } + + void __set_primary(const bool val) { + primary = val; + } + + void __set_dataSynced(const bool val) { + dataSynced = val; + } + + void __set_parameters(const std::map & val) { + parameters = val; + } + + bool operator == (const ClusterStorageDescriptor & rhs) const + { + if (!(cluster == rhs.cluster)) + return false; + if (!(location == rhs.location)) + return false; + if (!(primary == rhs.primary)) + return false; + if (!(dataSynced == rhs.dataSynced)) + return false; + if (!(parameters == rhs.parameters)) + return false; + return true; + } + bool operator != (const ClusterStorageDescriptor &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ClusterStorageDescriptor & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + typedef struct _Database__isset { _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false) {} bool name; @@ -775,7 +921,7 @@ }; typedef struct _StorageDescriptor__isset { - _StorageDescriptor__isset() : cols(false), location(false), inputFormat(false), outputFormat(false), compressed(false), numBuckets(false), serdeInfo(false), bucketCols(false), sortCols(false), parameters(false) {} + _StorageDescriptor__isset() : cols(false), location(false), inputFormat(false), outputFormat(false), compressed(false), numBuckets(false), serdeInfo(false), bucketCols(false), sortCols(false), parameters(false), clusterStorage(false) {} bool cols; bool location; bool inputFormat; @@ -786,13 +932,14 @@ bool bucketCols; bool sortCols; bool parameters; + bool clusterStorage; } _StorageDescriptor__isset; class StorageDescriptor { public: - static const char* ascii_fingerprint; // = "11E4CE18F895C13812C853DFDCD1293F"; - static const uint8_t binary_fingerprint[16]; // = {0x11,0xE4,0xCE,0x18,0xF8,0x95,0xC1,0x38,0x12,0xC8,0x53,0xDF,0xDC,0xD1,0x29,0x3F}; + static const char* ascii_fingerprint; // = "6E2134C82A6349BD16FC74AAA1974949"; + static const uint8_t binary_fingerprint[16]; // = {0x6E,0x21,0x34,0xC8,0x2A,0x63,0x49,0xBD,0x16,0xFC,0x74,0xAA,0xA1,0x97,0x49,0x49}; StorageDescriptor() : location(""), inputFormat(""), outputFormat(""), compressed(0), numBuckets(0) { } @@ -809,6 +956,7 @@ std::vector bucketCols; std::vector sortCols; std::map parameters; + std::vector clusterStorage; _StorageDescriptor__isset __isset; @@ -852,6 +1000,11 @@ parameters = val; } + void __set_clusterStorage(const std::vector & val) { + clusterStorage = val; + __isset.clusterStorage = true; + } + bool operator == (const StorageDescriptor & rhs) const { if (!(cols == rhs.cols)) @@ -874,6 +1027,10 @@ return false; if (!(parameters == rhs.parameters)) return false; + if (__isset.clusterStorage != rhs.__isset.clusterStorage) + return false; + else if (__isset.clusterStorage && !(clusterStorage == rhs.clusterStorage)) + return false; return true; } bool operator != (const StorageDescriptor &rhs) const { @@ -907,8 +1064,8 @@ class Table { public: - static const char* ascii_fingerprint; // = "26BE788C09746068A2616712C9262900"; - static const uint8_t binary_fingerprint[16]; // = {0x26,0xBE,0x78,0x8C,0x09,0x74,0x60,0x68,0xA2,0x61,0x67,0x12,0xC9,0x26,0x29,0x00}; + static const char* ascii_fingerprint; // = "B95423545849411EAB96918BDFBF8479"; + static const uint8_t binary_fingerprint[16]; // = {0xB9,0x54,0x23,0x54,0x58,0x49,0x41,0x1E,0xAB,0x96,0x91,0x8B,0xDF,0xBF,0x84,0x79}; Table() : tableName(""), dbName(""), owner(""), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(""), viewExpandedText(""), tableType("") { } @@ -1042,8 +1199,8 @@ class Partition { public: - static const char* ascii_fingerprint; // = "F480E1D1B8AEBDDB37F8E180C0F07395"; - static const uint8_t binary_fingerprint[16]; // = {0xF4,0x80,0xE1,0xD1,0xB8,0xAE,0xBD,0xDB,0x37,0xF8,0xE1,0x80,0xC0,0xF0,0x73,0x95}; + static const char* ascii_fingerprint; // = "8108795CC9429AC2FE53E4749B040EF8"; + static const uint8_t binary_fingerprint[16]; // = {0x81,0x08,0x79,0x5C,0xC9,0x42,0x9A,0xC2,0xFE,0x53,0xE4,0x74,0x9B,0x04,0x0E,0xF8}; Partition() : dbName(""), tableName(""), createTime(0), lastAccessTime(0) { } @@ -1144,8 +1301,8 @@ class Index { public: - static const char* ascii_fingerprint; // = "5FEE4F7E28935B644F207D74459F6A29"; - static const uint8_t binary_fingerprint[16]; // = {0x5F,0xEE,0x4F,0x7E,0x28,0x93,0x5B,0x64,0x4F,0x20,0x7D,0x74,0x45,0x9F,0x6A,0x29}; + static const char* ascii_fingerprint; // = "45A49EB1C8E638AF0A68D3368EDB1E88"; + static const uint8_t binary_fingerprint[16]; // = {0x45,0xA4,0x9E,0xB1,0xC8,0xE6,0x38,0xAF,0x0A,0x68,0xD3,0x36,0x8E,0xDB,0x1E,0x88}; Index() : indexName(""), indexHandlerClass(""), dbName(""), origTableName(""), createTime(0), lastAccessTime(0), indexTableName(""), deferredRebuild(0) { } Index: metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp =================================================================== --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (revision 1235046) +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp (working copy) @@ -92,6 +92,11 @@ printf("drop_table\n"); } + void drop_table_on_cluster(const std::string& cluster_name, const std::string& dbname, const std::string& name, const bool deleteData) { + // Your implementation goes here + printf("drop_table_on_cluster\n"); + } + void get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { // Your implementation goes here printf("get_tables\n"); @@ -147,6 +152,11 @@ printf("drop_partition\n"); } + bool drop_partition_on_cluster(const std::string& cluster_name, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { + // Your implementation goes here + printf("drop_partition_on_cluster\n"); + } + bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { // Your implementation goes here printf("drop_partition_by_name\n"); @@ -272,6 +282,31 @@ printf("get_index_names\n"); } + void create_cluster(const Cluster& cluster) { + // Your implementation goes here + printf("create_cluster\n"); + } + + void drop_cluster(const std::string& cluster_name) { + // Your implementation goes here + printf("drop_cluster\n"); + } + + void get_cluster(Cluster& _return, const std::string& cluster_name) { + // Your implementation goes here + printf("get_cluster\n"); + } + + void list_clusters(std::vector & _return) { + // Your implementation goes here + printf("list_clusters\n"); + } + + void alter_cluster(const std::string& cluster_name, const Cluster& cluster) { + // Your implementation goes here + printf("alter_cluster\n"); + } + bool create_role(const Role& role) { // Your implementation goes here printf("create_role\n"); Index: metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb =================================================================== --- metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (revision 1235046) +++ metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (working copy) @@ -249,6 +249,23 @@ return end + def drop_table_on_cluster(cluster_name, dbname, name, deleteData) + send_drop_table_on_cluster(cluster_name, dbname, name, deleteData) + recv_drop_table_on_cluster() + end + + def send_drop_table_on_cluster(cluster_name, dbname, name, deleteData) + send_message('drop_table_on_cluster', Drop_table_on_cluster_args, :cluster_name => cluster_name, :dbname => dbname, :name => name, :deleteData => deleteData) + end + + def recv_drop_table_on_cluster() + result = receive_message(Drop_table_on_cluster_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + def get_tables(db_name, pattern) send_get_tables(db_name, pattern) return recv_get_tables() @@ -439,6 +456,24 @@ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition failed: unknown result') end + def drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData) + send_drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData) + return recv_drop_partition_on_cluster() + end + + def send_drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData) + send_message('drop_partition_on_cluster', Drop_partition_on_cluster_args, :cluster_name => cluster_name, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :deleteData => deleteData) + end + + def recv_drop_partition_on_cluster() + result = receive_message(Drop_partition_on_cluster_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition_on_cluster failed: unknown result') + end + def drop_partition_by_name(db_name, tbl_name, part_name, deleteData) send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData) return recv_drop_partition_by_name() @@ -864,6 +899,91 @@ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_index_names failed: unknown result') end + def create_cluster(cluster) + send_create_cluster(cluster) + recv_create_cluster() + end + + def send_create_cluster(cluster) + send_message('create_cluster', Create_cluster_args, :cluster => cluster) + end + + def recv_create_cluster() + result = receive_message(Create_cluster_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + + def drop_cluster(cluster_name) + send_drop_cluster(cluster_name) + recv_drop_cluster() + end + + def send_drop_cluster(cluster_name) + send_message('drop_cluster', Drop_cluster_args, :cluster_name => cluster_name) + end + + def recv_drop_cluster() + result = receive_message(Drop_cluster_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + + def get_cluster(cluster_name) + send_get_cluster(cluster_name) + return recv_get_cluster() + end + + def send_get_cluster(cluster_name) + send_message('get_cluster', Get_cluster_args, :cluster_name => cluster_name) + end + + def recv_get_cluster() + result = receive_message(Get_cluster_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_cluster failed: unknown result') + end + + def list_clusters() + send_list_clusters() + return recv_list_clusters() + end + + def send_list_clusters() + send_message('list_clusters', List_clusters_args) + end + + def recv_list_clusters() + result = receive_message(List_clusters_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'list_clusters failed: unknown result') + end + + def alter_cluster(cluster_name, cluster) + send_alter_cluster(cluster_name, cluster) + recv_alter_cluster() + end + + def send_alter_cluster(cluster_name, cluster) + send_message('alter_cluster', Alter_cluster_args, :cluster_name => cluster_name, :cluster => cluster) + end + + def recv_alter_cluster() + result = receive_message(Alter_cluster_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + return + end + def create_role(role) send_create_role(role) return recv_create_role() @@ -1282,6 +1402,21 @@ write_result(result, oprot, 'drop_table', seqid) end + def process_drop_table_on_cluster(seqid, iprot, oprot) + args = read_args(iprot, Drop_table_on_cluster_args) + result = Drop_table_on_cluster_result.new() + begin + @handler.drop_table_on_cluster(args.cluster_name, args.dbname, args.name, args.deleteData) + rescue NoSuchObjectException => o1 + result.o1 = o1 + rescue MetaException => o2 + result.o2 = o2 + rescue InvalidOperationException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_table_on_cluster', seqid) + end + def process_get_tables(seqid, iprot, oprot) args = read_args(iprot, Get_tables_args) result = Get_tables_result.new() @@ -1433,6 +1568,21 @@ write_result(result, oprot, 'drop_partition', seqid) end + def process_drop_partition_on_cluster(seqid, iprot, oprot) + args = read_args(iprot, Drop_partition_on_cluster_args) + result = Drop_partition_on_cluster_result.new() + begin + result.success = @handler.drop_partition_on_cluster(args.cluster_name, args.db_name, args.tbl_name, args.part_vals, args.deleteData) + rescue NoSuchObjectException => o1 + result.o1 = o1 + rescue MetaException => o2 + result.o2 = o2 + rescue InvalidOperationException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_partition_on_cluster', seqid) + end + def process_drop_partition_by_name(seqid, iprot, oprot) args = read_args(iprot, Drop_partition_by_name_args) result = Drop_partition_by_name_result.new() @@ -1766,6 +1916,77 @@ write_result(result, oprot, 'get_index_names', seqid) end + def process_create_cluster(seqid, iprot, oprot) + args = read_args(iprot, Create_cluster_args) + result = Create_cluster_result.new() + begin + @handler.create_cluster(args.cluster) + rescue AlreadyExistsException => o1 + result.o1 = o1 + rescue InvalidObjectException => o2 + result.o2 = o2 + rescue MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'create_cluster', seqid) + end + + def process_drop_cluster(seqid, iprot, oprot) + args = read_args(iprot, Drop_cluster_args) + result = Drop_cluster_result.new() + begin + @handler.drop_cluster(args.cluster_name) + rescue NoSuchObjectException => o1 + result.o1 = o1 + rescue InvalidOperationException => o2 + result.o2 = o2 + rescue MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_cluster', seqid) + end + + def process_get_cluster(seqid, iprot, oprot) + args = read_args(iprot, Get_cluster_args) + result = Get_cluster_result.new() + begin + result.success = @handler.get_cluster(args.cluster_name) + rescue MetaException => o1 + result.o1 = o1 + rescue NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_cluster', seqid) + end + + def process_list_clusters(seqid, iprot, oprot) + args = read_args(iprot, List_clusters_args) + result = List_clusters_result.new() + begin + result.success = @handler.list_clusters() + rescue MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'list_clusters', seqid) + end + + def process_alter_cluster(seqid, iprot, oprot) + args = read_args(iprot, Alter_cluster_args) + result = Alter_cluster_result.new() + begin + @handler.alter_cluster(args.cluster_name, args.cluster) + rescue MetaException => o1 + result.o1 = o1 + rescue InvalidObjectException => o2 + result.o2 = o2 + rescue NoSuchObjectException => o3 + result.o3 = o3 + rescue AlreadyExistsException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'alter_cluster', seqid) + end + def process_create_role(seqid, iprot, oprot) args = read_args(iprot, Create_role_args) result = Create_role_result.new() @@ -2439,6 +2660,48 @@ ::Thrift::Struct.generate_accessors self end + class Drop_table_on_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER_NAME = 1 + DBNAME = 2 + NAME = 3 + DELETEDATA = 4 + + FIELDS = { + CLUSTER_NAME => {:type => ::Thrift::Types::STRING, :name => 'cluster_name'}, + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + DELETEDATA => {:type => ::Thrift::Types::BOOL, :name => 'deleteData'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_table_on_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => MetaException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => InvalidOperationException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Get_tables_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 @@ -2869,6 +3132,52 @@ ::Thrift::Struct.generate_accessors self end + class Drop_partition_on_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER_NAME = 1 + DB_NAME = 2 + TBL_NAME = 3 + PART_VALS = 4 + DELETEDATA = 5 + + FIELDS = { + CLUSTER_NAME => {:type => ::Thrift::Types::STRING, :name => 'cluster_name'}, + DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + PART_VALS => {:type => ::Thrift::Types::LIST, :name => 'part_vals', :element => {:type => ::Thrift::Types::STRING}}, + DELETEDATA => {:type => ::Thrift::Types::BOOL, :name => 'deleteData'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_partition_on_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => MetaException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => InvalidOperationException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Drop_partition_by_name_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 @@ -3895,6 +4204,187 @@ ::Thrift::Struct.generate_accessors self end + class Create_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER = 1 + + FIELDS = { + CLUSTER => {:type => ::Thrift::Types::STRUCT, :name => 'cluster', :class => Cluster} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER_NAME = 1 + + FIELDS = { + CLUSTER_NAME => {:type => ::Thrift::Types::STRING, :name => 'cluster_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER_NAME = 1 + + FIELDS = { + CLUSTER_NAME => {:type => ::Thrift::Types::STRING, :name => 'cluster_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => Cluster}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class List_clusters_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class List_clusters_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => Cluster}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_cluster_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER_NAME = 1 + CLUSTER = 2 + + FIELDS = { + CLUSTER_NAME => {:type => ::Thrift::Types::STRING, :name => 'cluster_name'}, + CLUSTER => {:type => ::Thrift::Types::STRUCT, :name => 'cluster', :class => Cluster} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_cluster_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => NoSuchObjectException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => AlreadyExistsException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Create_role_args include ::Thrift::Struct, ::Thrift::Struct_Union ROLE = 1 Index: metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb =================================================================== --- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (revision 1235046) +++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb (working copy) @@ -226,6 +226,54 @@ ::Thrift::Struct.generate_accessors self end +class Cluster + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + COMMENT = 2 + CREATETIME = 3 + LOCATIONURI = 4 + PARAMETERS = 5 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + COMMENT => {:type => ::Thrift::Types::STRING, :name => 'comment'}, + CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'}, + LOCATIONURI => {:type => ::Thrift::Types::STRING, :name => 'locationUri'}, + PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class ClusterStorageDescriptor + include ::Thrift::Struct, ::Thrift::Struct_Union + CLUSTER = 1 + LOCATION = 2 + PRIMARY = 3 + DATASYNCED = 4 + PARAMETERS = 5 + + FIELDS = { + CLUSTER => {:type => ::Thrift::Types::STRUCT, :name => 'cluster', :class => Cluster}, + LOCATION => {:type => ::Thrift::Types::STRING, :name => 'location'}, + PRIMARY => {:type => ::Thrift::Types::BOOL, :name => 'primary'}, + DATASYNCED => {:type => ::Thrift::Types::BOOL, :name => 'dataSynced'}, + PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Database include ::Thrift::Struct, ::Thrift::Struct_Union NAME = 1 @@ -300,6 +348,7 @@ BUCKETCOLS = 8 SORTCOLS = 9 PARAMETERS = 10 + CLUSTERSTORAGE = 11 FIELDS = { COLS => {:type => ::Thrift::Types::LIST, :name => 'cols', :element => {:type => ::Thrift::Types::STRUCT, :class => FieldSchema}}, @@ -311,7 +360,8 @@ SERDEINFO => {:type => ::Thrift::Types::STRUCT, :name => 'serdeInfo', :class => SerDeInfo}, BUCKETCOLS => {:type => ::Thrift::Types::LIST, :name => 'bucketCols', :element => {:type => ::Thrift::Types::STRING}}, SORTCOLS => {:type => ::Thrift::Types::LIST, :name => 'sortCols', :element => {:type => ::Thrift::Types::STRUCT, :class => Order}}, - PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}} + PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, + CLUSTERSTORAGE => {:type => ::Thrift::Types::LIST, :name => 'clusterStorage', :element => {:type => ::Thrift::Types::STRUCT, :class => ClusterStorageDescriptor}, :optional => true} } def struct_fields; FIELDS; end Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java (working copy) @@ -55,6 +55,8 @@ public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public void drop_table_on_cluster(String cluster_name, String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException; + public List get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException; public List get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException; @@ -77,6 +79,8 @@ public boolean drop_partition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public boolean drop_partition_on_cluster(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException; + public boolean drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; public Partition get_partition(String db_name, String tbl_name, List part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; @@ -127,6 +131,16 @@ public List get_index_names(String db_name, String tbl_name, short max_indexes) throws MetaException, org.apache.thrift.TException; + public void create_cluster(Cluster cluster) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException; + + public void drop_cluster(String cluster_name) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + + public Cluster get_cluster(String cluster_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + + public List list_clusters() throws MetaException, org.apache.thrift.TException; + + public void alter_cluster(String cluster_name, Cluster cluster) throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException, org.apache.thrift.TException; + public boolean create_role(Role role) throws MetaException, org.apache.thrift.TException; public boolean drop_role(String role_name) throws MetaException, org.apache.thrift.TException; @@ -187,6 +201,8 @@ public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void drop_table_on_cluster(String cluster_name, String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -209,6 +225,8 @@ public void drop_partition(String db_name, String tbl_name, List part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void drop_partition_on_cluster(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_partition(String db_name, String tbl_name, List part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -259,6 +277,16 @@ public void get_index_names(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_cluster(Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_cluster(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_cluster(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void list_clusters(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void alter_cluster(String cluster_name, Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_role(Role role, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void drop_role(String role_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -718,6 +746,38 @@ return; } + public void drop_table_on_cluster(String cluster_name, String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException + { + send_drop_table_on_cluster(cluster_name, dbname, name, deleteData); + recv_drop_table_on_cluster(); + } + + public void send_drop_table_on_cluster(String cluster_name, String dbname, String name, boolean deleteData) throws org.apache.thrift.TException + { + drop_table_on_cluster_args args = new drop_table_on_cluster_args(); + args.setCluster_name(cluster_name); + args.setDbname(dbname); + args.setName(name); + args.setDeleteData(deleteData); + sendBase("drop_table_on_cluster", args); + } + + public void recv_drop_table_on_cluster() throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException + { + drop_table_on_cluster_result result = new drop_table_on_cluster_result(); + receiveBase(result, "drop_table_on_cluster"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + public List get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException { send_get_tables(db_name, pattern); @@ -1060,6 +1120,42 @@ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_partition failed: unknown result"); } + public boolean drop_partition_on_cluster(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException + { + send_drop_partition_on_cluster(cluster_name, db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition_on_cluster(); + } + + public void send_drop_partition_on_cluster(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData) throws org.apache.thrift.TException + { + drop_partition_on_cluster_args args = new drop_partition_on_cluster_args(); + args.setCluster_name(cluster_name); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); + args.setDeleteData(deleteData); + sendBase("drop_partition_on_cluster", args); + } + + public boolean recv_drop_partition_on_cluster() throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException + { + drop_partition_on_cluster_result result = new drop_partition_on_cluster_result(); + receiveBase(result, "drop_partition_on_cluster"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "drop_partition_on_cluster failed: unknown result"); + } + public boolean drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); @@ -1845,6 +1941,151 @@ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_index_names failed: unknown result"); } + public void create_cluster(Cluster cluster) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_cluster(cluster); + recv_create_cluster(); + } + + public void send_create_cluster(Cluster cluster) throws org.apache.thrift.TException + { + create_cluster_args args = new create_cluster_args(); + args.setCluster(cluster); + sendBase("create_cluster", args); + } + + public void recv_create_cluster() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_cluster_result result = new create_cluster_result(); + receiveBase(result, "create_cluster"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + + public void drop_cluster(String cluster_name) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_drop_cluster(cluster_name); + recv_drop_cluster(); + } + + public void send_drop_cluster(String cluster_name) throws org.apache.thrift.TException + { + drop_cluster_args args = new drop_cluster_args(); + args.setCluster_name(cluster_name); + sendBase("drop_cluster", args); + } + + public void recv_drop_cluster() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + drop_cluster_result result = new drop_cluster_result(); + receiveBase(result, "drop_cluster"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + + public Cluster get_cluster(String cluster_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_cluster(cluster_name); + return recv_get_cluster(); + } + + public void send_get_cluster(String cluster_name) throws org.apache.thrift.TException + { + get_cluster_args args = new get_cluster_args(); + args.setCluster_name(cluster_name); + sendBase("get_cluster", args); + } + + public Cluster recv_get_cluster() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_cluster_result result = new get_cluster_result(); + receiveBase(result, "get_cluster"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_cluster failed: unknown result"); + } + + public List list_clusters() throws MetaException, org.apache.thrift.TException + { + send_list_clusters(); + return recv_list_clusters(); + } + + public void send_list_clusters() throws org.apache.thrift.TException + { + list_clusters_args args = new list_clusters_args(); + sendBase("list_clusters", args); + } + + public List recv_list_clusters() throws MetaException, org.apache.thrift.TException + { + list_clusters_result result = new list_clusters_result(); + receiveBase(result, "list_clusters"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "list_clusters failed: unknown result"); + } + + public void alter_cluster(String cluster_name, Cluster cluster) throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException, org.apache.thrift.TException + { + send_alter_cluster(cluster_name, cluster); + recv_alter_cluster(); + } + + public void send_alter_cluster(String cluster_name, Cluster cluster) throws org.apache.thrift.TException + { + alter_cluster_args args = new alter_cluster_args(); + args.setCluster_name(cluster_name); + args.setCluster(cluster); + sendBase("alter_cluster", args); + } + + public void recv_alter_cluster() throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException, org.apache.thrift.TException + { + alter_cluster_result result = new alter_cluster_result(); + receiveBase(result, "alter_cluster"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + return; + } + public boolean create_role(Role role) throws MetaException, org.apache.thrift.TException { send_create_role(role); @@ -2703,6 +2944,47 @@ } } + public void drop_table_on_cluster(String cluster_name, String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_table_on_cluster_call method_call = new drop_table_on_cluster_call(cluster_name, dbname, name, deleteData, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class drop_table_on_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private String cluster_name; + private String dbname; + private String name; + private boolean deleteData; + public drop_table_on_cluster_call(String cluster_name, String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster_name = cluster_name; + this.dbname = dbname; + this.name = name; + this.deleteData = deleteData; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_table_on_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_table_on_cluster_args args = new drop_table_on_cluster_args(); + args.setCluster_name(cluster_name); + args.setDbname(dbname); + args.setName(name); + args.setDeleteData(deleteData); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_drop_table_on_cluster(); + } + } + public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_tables_call method_call = new get_tables_call(db_name, pattern, resultHandler, this, ___protocolFactory, ___transport); @@ -3097,6 +3379,50 @@ } } + public void drop_partition_on_cluster(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_partition_on_cluster_call method_call = new drop_partition_on_cluster_call(cluster_name, db_name, tbl_name, part_vals, deleteData, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class drop_partition_on_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private String cluster_name; + private String db_name; + private String tbl_name; + private List part_vals; + private boolean deleteData; + public drop_partition_on_cluster_call(String cluster_name, String db_name, String tbl_name, List part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster_name = cluster_name; + this.db_name = db_name; + this.tbl_name = tbl_name; + this.part_vals = part_vals; + this.deleteData = deleteData; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_partition_on_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_partition_on_cluster_args args = new drop_partition_on_cluster_args(); + args.setCluster_name(cluster_name); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + args.setPart_vals(part_vals); + args.setDeleteData(deleteData); + args.write(prot); + prot.writeMessageEnd(); + } + + public boolean getResult() throws NoSuchObjectException, MetaException, InvalidOperationException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_drop_partition_on_cluster(); + } + } + public void drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); drop_partition_by_name_call method_call = new drop_partition_by_name_call(db_name, tbl_name, part_name, deleteData, resultHandler, this, ___protocolFactory, ___transport); @@ -4077,6 +4403,166 @@ } } + public void create_cluster(Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + create_cluster_call method_call = new create_cluster_call(cluster, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class create_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private Cluster cluster; + public create_cluster_call(Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster = cluster; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + create_cluster_args args = new create_cluster_args(); + args.setCluster(cluster); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_create_cluster(); + } + } + + public void drop_cluster(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_cluster_call method_call = new drop_cluster_call(cluster_name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class drop_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private String cluster_name; + public drop_cluster_call(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster_name = cluster_name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_cluster_args args = new drop_cluster_args(); + args.setCluster_name(cluster_name); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_drop_cluster(); + } + } + + public void get_cluster(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_cluster_call method_call = new get_cluster_call(cluster_name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private String cluster_name; + public get_cluster_call(String cluster_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster_name = cluster_name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_cluster_args args = new get_cluster_args(); + args.setCluster_name(cluster_name); + args.write(prot); + prot.writeMessageEnd(); + } + + public Cluster getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_cluster(); + } + } + + public void list_clusters(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + list_clusters_call method_call = new list_clusters_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class list_clusters_call extends org.apache.thrift.async.TAsyncMethodCall { + public list_clusters_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("list_clusters", org.apache.thrift.protocol.TMessageType.CALL, 0)); + list_clusters_args args = new list_clusters_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_list_clusters(); + } + } + + public void alter_cluster(String cluster_name, Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + alter_cluster_call method_call = new alter_cluster_call(cluster_name, cluster, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class alter_cluster_call extends org.apache.thrift.async.TAsyncMethodCall { + private String cluster_name; + private Cluster cluster; + public alter_cluster_call(String cluster_name, Cluster cluster, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cluster_name = cluster_name; + this.cluster = cluster; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_cluster", org.apache.thrift.protocol.TMessageType.CALL, 0)); + alter_cluster_args args = new alter_cluster_args(); + args.setCluster_name(cluster_name); + args.setCluster(cluster); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, InvalidObjectException, NoSuchObjectException, AlreadyExistsException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_alter_cluster(); + } + } + public void create_role(Role role, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); create_role_call method_call = new create_role_call(role, resultHandler, this, ___protocolFactory, ___transport); @@ -4591,6 +5077,7 @@ processMap.put("get_schema", new get_schema()); processMap.put("create_table", new create_table()); processMap.put("drop_table", new drop_table()); + processMap.put("drop_table_on_cluster", new drop_table_on_cluster()); processMap.put("get_tables", new get_tables()); processMap.put("get_all_tables", new get_all_tables()); processMap.put("get_table", new get_table()); @@ -4602,6 +5089,7 @@ processMap.put("append_partition", new append_partition()); processMap.put("append_partition_by_name", new append_partition_by_name()); processMap.put("drop_partition", new drop_partition()); + processMap.put("drop_partition_on_cluster", new drop_partition_on_cluster()); processMap.put("drop_partition_by_name", new drop_partition_by_name()); processMap.put("get_partition", new get_partition()); processMap.put("get_partition_with_auth", new get_partition_with_auth()); @@ -4627,6 +5115,11 @@ processMap.put("get_index_by_name", new get_index_by_name()); processMap.put("get_indexes", new get_indexes()); processMap.put("get_index_names", new get_index_names()); + processMap.put("create_cluster", new create_cluster()); + processMap.put("drop_cluster", new drop_cluster()); + processMap.put("get_cluster", new get_cluster()); + processMap.put("list_clusters", new list_clusters()); + processMap.put("alter_cluster", new alter_cluster()); processMap.put("create_role", new create_role()); processMap.put("drop_role", new drop_role()); processMap.put("get_role_names", new get_role_names()); @@ -4962,6 +5455,30 @@ } } + private static class drop_table_on_cluster extends org.apache.thrift.ProcessFunction { + public drop_table_on_cluster() { + super("drop_table_on_cluster"); + } + + protected drop_table_on_cluster_args getEmptyArgsInstance() { + return new drop_table_on_cluster_args(); + } + + protected drop_table_on_cluster_result getResult(I iface, drop_table_on_cluster_args args) throws org.apache.thrift.TException { + drop_table_on_cluster_result result = new drop_table_on_cluster_result(); + try { + iface.drop_table_on_cluster(args.cluster_name, args.dbname, args.name, args.deleteData); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } catch (InvalidOperationException o3) { + result.o3 = o3; + } + return result; + } + } + private static class get_tables extends org.apache.thrift.ProcessFunction { public get_tables() { super("get_tables"); @@ -5214,6 +5731,31 @@ } } + private static class drop_partition_on_cluster extends org.apache.thrift.ProcessFunction { + public drop_partition_on_cluster() { + super("drop_partition_on_cluster"); + } + + protected drop_partition_on_cluster_args getEmptyArgsInstance() { + return new drop_partition_on_cluster_args(); + } + + protected drop_partition_on_cluster_result getResult(I iface, drop_partition_on_cluster_args args) throws org.apache.thrift.TException { + drop_partition_on_cluster_result result = new drop_partition_on_cluster_result(); + try { + result.success = iface.drop_partition_on_cluster(args.cluster_name, args.db_name, args.tbl_name, args.part_vals, args.deleteData); + result.setSuccessIsSet(true); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } catch (InvalidOperationException o3) { + result.o3 = o3; + } + return result; + } + } + private static class drop_partition_by_name extends org.apache.thrift.ProcessFunction { public drop_partition_by_name() { super("drop_partition_by_name"); @@ -5775,6 +6317,122 @@ } } + private static class create_cluster extends org.apache.thrift.ProcessFunction { + public create_cluster() { + super("create_cluster"); + } + + protected create_cluster_args getEmptyArgsInstance() { + return new create_cluster_args(); + } + + protected create_cluster_result getResult(I iface, create_cluster_args args) throws org.apache.thrift.TException { + create_cluster_result result = new create_cluster_result(); + try { + iface.create_cluster(args.cluster); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + private static class drop_cluster extends org.apache.thrift.ProcessFunction { + public drop_cluster() { + super("drop_cluster"); + } + + protected drop_cluster_args getEmptyArgsInstance() { + return new drop_cluster_args(); + } + + protected drop_cluster_result getResult(I iface, drop_cluster_args args) throws org.apache.thrift.TException { + drop_cluster_result result = new drop_cluster_result(); + try { + iface.drop_cluster(args.cluster_name); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + private static class get_cluster extends org.apache.thrift.ProcessFunction { + public get_cluster() { + super("get_cluster"); + } + + protected get_cluster_args getEmptyArgsInstance() { + return new get_cluster_args(); + } + + protected get_cluster_result getResult(I iface, get_cluster_args args) throws org.apache.thrift.TException { + get_cluster_result result = new get_cluster_result(); + try { + result.success = iface.get_cluster(args.cluster_name); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + + private static class list_clusters extends org.apache.thrift.ProcessFunction { + public list_clusters() { + super("list_clusters"); + } + + protected list_clusters_args getEmptyArgsInstance() { + return new list_clusters_args(); + } + + protected list_clusters_result getResult(I iface, list_clusters_args args) throws org.apache.thrift.TException { + list_clusters_result result = new list_clusters_result(); + try { + result.success = iface.list_clusters(); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + private static class alter_cluster extends org.apache.thrift.ProcessFunction { + public alter_cluster() { + super("alter_cluster"); + } + + protected alter_cluster_args getEmptyArgsInstance() { + return new alter_cluster_args(); + } + + protected alter_cluster_result getResult(I iface, alter_cluster_args args) throws org.apache.thrift.TException { + alter_cluster_result result = new alter_cluster_result(); + try { + iface.alter_cluster(args.cluster_name, args.cluster); + } catch (MetaException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (NoSuchObjectException o3) { + result.o3 = o3; + } catch (AlreadyExistsException o4) { + result.o4 = o4; + } + return result; + } + } + private static class create_role extends org.apache.thrift.ProcessFunction { public create_role() { super("create_role"); @@ -9109,7 +9767,7 @@ if (lastComparison != 0) { return lastComparison; } - } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -9140,13 +9798,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list98 = iprot.readListBegin(); - this.success = new ArrayList(_list98.size); - for (int _i99 = 0; _i99 < _list98.size; ++_i99) + org.apache.thrift.protocol.TList _list112 = iprot.readListBegin(); + this.success = new ArrayList(_list112.size); + for (int _i113 = 0; _i113 < _list112.size; ++_i113) { - String _elem100; // required - _elem100 = iprot.readString(); - this.success.add(_elem100); + String _elem114; // required + _elem114 = iprot.readString(); + this.success.add(_elem114); } iprot.readListEnd(); } @@ -9178,9 +9836,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter101 : this.success) + for (String _iter115 : this.success) { - oprot.writeString(_iter101); + oprot.writeString(_iter115); } oprot.writeListEnd(); } @@ -9758,13 +10416,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list102 = iprot.readListBegin(); - this.success = new ArrayList(_list102.size); - for (int _i103 = 0; _i103 < _list102.size; ++_i103) + org.apache.thrift.protocol.TList _list116 = iprot.readListBegin(); + this.success = new ArrayList(_list116.size); + for (int _i117 = 0; _i117 < _list116.size; ++_i117) { - String _elem104; // required - _elem104 = iprot.readString(); - this.success.add(_elem104); + String _elem118; // required + _elem118 = iprot.readString(); + this.success.add(_elem118); } iprot.readListEnd(); } @@ -9796,9 +10454,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter105 : this.success) + for (String _iter119 : this.success) { - oprot.writeString(_iter105); + oprot.writeString(_iter119); } oprot.writeListEnd(); } @@ -13619,16 +14277,16 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map106 = iprot.readMapBegin(); - this.success = new HashMap(2*_map106.size); - for (int _i107 = 0; _i107 < _map106.size; ++_i107) + org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin(); + this.success = new HashMap(2*_map120.size); + for (int _i121 = 0; _i121 < _map120.size; ++_i121) { - String _key108; // required - Type _val109; // required - _key108 = iprot.readString(); - _val109 = new Type(); - _val109.read(iprot); - this.success.put(_key108, _val109); + String _key122; // required + Type _val123; // required + _key122 = iprot.readString(); + _val123 = new Type(); + _val123.read(iprot); + this.success.put(_key122, _val123); } iprot.readMapEnd(); } @@ -13660,10 +14318,10 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Map.Entry _iter110 : this.success.entrySet()) + for (Map.Entry _iter124 : this.success.entrySet()) { - oprot.writeString(_iter110.getKey()); - _iter110.getValue().write(oprot); + oprot.writeString(_iter124.getKey()); + _iter124.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -14559,14 +15217,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list111 = iprot.readListBegin(); - this.success = new ArrayList(_list111.size); - for (int _i112 = 0; _i112 < _list111.size; ++_i112) + org.apache.thrift.protocol.TList _list125 = iprot.readListBegin(); + this.success = new ArrayList(_list125.size); + for (int _i126 = 0; _i126 < _list125.size; ++_i126) { - FieldSchema _elem113; // required - _elem113 = new FieldSchema(); - _elem113.read(iprot); - this.success.add(_elem113); + FieldSchema _elem127; // required + _elem127 = new FieldSchema(); + _elem127.read(iprot); + this.success.add(_elem127); } iprot.readListEnd(); } @@ -14614,9 +15272,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (FieldSchema _iter114 : this.success) + for (FieldSchema _iter128 : this.success) { - _iter114.write(oprot); + _iter128.write(oprot); } oprot.writeListEnd(); } @@ -15536,14 +16194,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list115 = iprot.readListBegin(); - this.success = new ArrayList(_list115.size); - for (int _i116 = 0; _i116 < _list115.size; ++_i116) + org.apache.thrift.protocol.TList _list129 = iprot.readListBegin(); + this.success = new ArrayList(_list129.size); + for (int _i130 = 0; _i130 < _list129.size; ++_i130) { - FieldSchema _elem117; // required - _elem117 = new FieldSchema(); - _elem117.read(iprot); - this.success.add(_elem117); + FieldSchema _elem131; // required + _elem131 = new FieldSchema(); + _elem131.read(iprot); + this.success.add(_elem131); } iprot.readListEnd(); } @@ -15591,9 +16249,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (FieldSchema _iter118 : this.success) + for (FieldSchema _iter132 : this.success) { - _iter118.write(oprot); + _iter132.write(oprot); } oprot.writeListEnd(); } @@ -17382,6 +18040,1034 @@ } + public static class drop_table_on_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_on_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private String cluster_name; // required + private String dbname; // required + private String name; // required + private boolean deleteData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER_NAME((short)1, "cluster_name"), + DBNAME((short)2, "dbname"), + NAME((short)3, "name"), + DELETE_DATA((short)4, "deleteData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER_NAME + return CLUSTER_NAME; + case 2: // DBNAME + return DBNAME; + case 3: // NAME + return NAME; + case 4: // DELETE_DATA + return DELETE_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __DELETEDATA_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER_NAME, new org.apache.thrift.meta_data.FieldMetaData("cluster_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_on_cluster_args.class, metaDataMap); + } + + public drop_table_on_cluster_args() { + } + + public drop_table_on_cluster_args( + String cluster_name, + String dbname, + String name, + boolean deleteData) + { + this(); + this.cluster_name = cluster_name; + this.dbname = dbname; + this.name = name; + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public drop_table_on_cluster_args(drop_table_on_cluster_args other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + if (other.isSetCluster_name()) { + this.cluster_name = other.cluster_name; + } + if (other.isSetDbname()) { + this.dbname = other.dbname; + } + if (other.isSetName()) { + this.name = other.name; + } + this.deleteData = other.deleteData; + } + + public drop_table_on_cluster_args deepCopy() { + return new drop_table_on_cluster_args(this); + } + + @Override + public void clear() { + this.cluster_name = null; + this.dbname = null; + this.name = null; + setDeleteDataIsSet(false); + this.deleteData = false; + } + + public String getCluster_name() { + return this.cluster_name; + } + + public void setCluster_name(String cluster_name) { + this.cluster_name = cluster_name; + } + + public void unsetCluster_name() { + this.cluster_name = null; + } + + /** Returns true if field cluster_name is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster_name() { + return this.cluster_name != null; + } + + public void setCluster_nameIsSet(boolean value) { + if (!value) { + this.cluster_name = null; + } + } + + public String getDbname() { + return this.dbname; + } + + public void setDbname(String dbname) { + this.dbname = dbname; + } + + public void unsetDbname() { + this.dbname = null; + } + + /** Returns true if field dbname is set (has been assigned a value) and false otherwise */ + public boolean isSetDbname() { + return this.dbname != null; + } + + public void setDbnameIsSet(boolean value) { + if (!value) { + this.dbname = null; + } + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + public void unsetDeleteData() { + __isset_bit_vector.clear(__DELETEDATA_ISSET_ID); + } + + /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ + public boolean isSetDeleteData() { + return __isset_bit_vector.get(__DELETEDATA_ISSET_ID); + } + + public void setDeleteDataIsSet(boolean value) { + __isset_bit_vector.set(__DELETEDATA_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER_NAME: + if (value == null) { + unsetCluster_name(); + } else { + setCluster_name((String)value); + } + break; + + case DBNAME: + if (value == null) { + unsetDbname(); + } else { + setDbname((String)value); + } + break; + + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case DELETE_DATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER_NAME: + return getCluster_name(); + + case DBNAME: + return getDbname(); + + case NAME: + return getName(); + + case DELETE_DATA: + return Boolean.valueOf(isDeleteData()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER_NAME: + return isSetCluster_name(); + case DBNAME: + return isSetDbname(); + case NAME: + return isSetName(); + case DELETE_DATA: + return isSetDeleteData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_table_on_cluster_args) + return this.equals((drop_table_on_cluster_args)that); + return false; + } + + public boolean equals(drop_table_on_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster_name = true && this.isSetCluster_name(); + boolean that_present_cluster_name = true && that.isSetCluster_name(); + if (this_present_cluster_name || that_present_cluster_name) { + if (!(this_present_cluster_name && that_present_cluster_name)) + return false; + if (!this.cluster_name.equals(that.cluster_name)) + return false; + } + + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) + return false; + if (!this.dbname.equals(that.dbname)) + return false; + } + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_table_on_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_table_on_cluster_args typedOther = (drop_table_on_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster_name()).compareTo(typedOther.isSetCluster_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster_name, typedOther.cluster_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDbname()).compareTo(typedOther.isSetDbname()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbname()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, typedOther.dbname); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetName()).compareTo(typedOther.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, typedOther.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(typedOther.isSetDeleteData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDeleteData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, typedOther.deleteData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.cluster_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // DBNAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.dbname = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // DELETE_DATA + if (field.type == org.apache.thrift.protocol.TType.BOOL) { + this.deleteData = iprot.readBool(); + setDeleteDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster_name != null) { + oprot.writeFieldBegin(CLUSTER_NAME_FIELD_DESC); + oprot.writeString(this.cluster_name); + oprot.writeFieldEnd(); + } + if (this.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(this.dbname); + oprot.writeFieldEnd(); + } + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(this.deleteData); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_table_on_cluster_args("); + boolean first = true; + + sb.append("cluster_name:"); + if (this.cluster_name == null) { + sb.append("null"); + } else { + sb.append(this.cluster_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("dbname:"); + if (this.dbname == null) { + sb.append("null"); + } else { + sb.append(this.dbname); + } + first = false; + if (!first) sb.append(", "); + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class drop_table_on_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_on_cluster_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private NoSuchObjectException o1; // required + private MetaException o2; // required + private InvalidOperationException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_on_cluster_result.class, metaDataMap); + } + + public drop_table_on_cluster_result() { + } + + public drop_table_on_cluster_result( + NoSuchObjectException o1, + MetaException o2, + InvalidOperationException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public drop_table_on_cluster_result(drop_table_on_cluster_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new InvalidOperationException(other.o3); + } + } + + public drop_table_on_cluster_result deepCopy() { + return new drop_table_on_cluster_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public InvalidOperationException getO3() { + return this.o3; + } + + public void setO3(InvalidOperationException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((InvalidOperationException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_table_on_cluster_result) + return this.equals((drop_table_on_cluster_result)that); + return false; + } + + public boolean equals(drop_table_on_cluster_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_table_on_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_table_on_cluster_result typedOther = (drop_table_on_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new MetaException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // O3 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o3 = new InvalidOperationException(); + this.o3.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_table_on_cluster_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + public static class get_tables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_args"); @@ -18082,13 +19768,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list119 = iprot.readListBegin(); - this.success = new ArrayList(_list119.size); - for (int _i120 = 0; _i120 < _list119.size; ++_i120) + org.apache.thrift.protocol.TList _list133 = iprot.readListBegin(); + this.success = new ArrayList(_list133.size); + for (int _i134 = 0; _i134 < _list133.size; ++_i134) { - String _elem121; // required - _elem121 = iprot.readString(); - this.success.add(_elem121); + String _elem135; // required + _elem135 = iprot.readString(); + this.success.add(_elem135); } iprot.readListEnd(); } @@ -18120,9 +19806,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter122 : this.success) + for (String _iter136 : this.success) { - oprot.writeString(_iter122); + oprot.writeString(_iter136); } oprot.writeListEnd(); } @@ -18794,13 +20480,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list123 = iprot.readListBegin(); - this.success = new ArrayList(_list123.size); - for (int _i124 = 0; _i124 < _list123.size; ++_i124) + org.apache.thrift.protocol.TList _list137 = iprot.readListBegin(); + this.success = new ArrayList(_list137.size); + for (int _i138 = 0; _i138 < _list137.size; ++_i138) { - String _elem125; // required - _elem125 = iprot.readString(); - this.success.add(_elem125); + String _elem139; // required + _elem139 = iprot.readString(); + this.success.add(_elem139); } iprot.readListEnd(); } @@ -18832,9 +20518,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter126 : this.success) + for (String _iter140 : this.success) { - oprot.writeString(_iter126); + oprot.writeString(_iter140); } oprot.writeListEnd(); } @@ -20071,13 +21757,13 @@ case 2: // TBL_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list127 = iprot.readListBegin(); - this.tbl_names = new ArrayList(_list127.size); - for (int _i128 = 0; _i128 < _list127.size; ++_i128) + org.apache.thrift.protocol.TList _list141 = iprot.readListBegin(); + this.tbl_names = new ArrayList(_list141.size); + for (int _i142 = 0; _i142 < _list141.size; ++_i142) { - String _elem129; // required - _elem129 = iprot.readString(); - this.tbl_names.add(_elem129); + String _elem143; // required + _elem143 = iprot.readString(); + this.tbl_names.add(_elem143); } iprot.readListEnd(); } @@ -20107,9 +21793,9 @@ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.tbl_names.size())); - for (String _iter130 : this.tbl_names) + for (String _iter144 : this.tbl_names) { - oprot.writeString(_iter130); + oprot.writeString(_iter144); } oprot.writeListEnd(); } @@ -20619,14 +22305,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list131 = iprot.readListBegin(); - this.success = new ArrayList
(_list131.size); - for (int _i132 = 0; _i132 < _list131.size; ++_i132) + org.apache.thrift.protocol.TList _list145 = iprot.readListBegin(); + this.success = new ArrayList
(_list145.size); + for (int _i146 = 0; _i146 < _list145.size; ++_i146) { - Table _elem133; // required - _elem133 = new Table(); - _elem133.read(iprot); - this.success.add(_elem133); + Table _elem147; // required + _elem147 = new Table(); + _elem147.read(iprot); + this.success.add(_elem147); } iprot.readListEnd(); } @@ -20674,9 +22360,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Table _iter134 : this.success) + for (Table _iter148 : this.success) { - _iter134.write(oprot); + _iter148.write(oprot); } oprot.writeListEnd(); } @@ -21686,13 +23372,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list135 = iprot.readListBegin(); - this.success = new ArrayList(_list135.size); - for (int _i136 = 0; _i136 < _list135.size; ++_i136) + org.apache.thrift.protocol.TList _list149 = iprot.readListBegin(); + this.success = new ArrayList(_list149.size); + for (int _i150 = 0; _i150 < _list149.size; ++_i150) { - String _elem137; // required - _elem137 = iprot.readString(); - this.success.add(_elem137); + String _elem151; // required + _elem151 = iprot.readString(); + this.success.add(_elem151); } iprot.readListEnd(); } @@ -21740,9 +23426,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter138 : this.success) + for (String _iter152 : this.success) { - oprot.writeString(_iter138); + oprot.writeString(_iter152); } oprot.writeListEnd(); } @@ -23782,14 +25468,14 @@ case 1: // NEW_PARTS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list139 = iprot.readListBegin(); - this.new_parts = new ArrayList(_list139.size); - for (int _i140 = 0; _i140 < _list139.size; ++_i140) + org.apache.thrift.protocol.TList _list153 = iprot.readListBegin(); + this.new_parts = new ArrayList(_list153.size); + for (int _i154 = 0; _i154 < _list153.size; ++_i154) { - Partition _elem141; // required - _elem141 = new Partition(); - _elem141.read(iprot); - this.new_parts.add(_elem141); + Partition _elem155; // required + _elem155 = new Partition(); + _elem155.read(iprot); + this.new_parts.add(_elem155); } iprot.readListEnd(); } @@ -23814,9 +25500,9 @@ oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.new_parts.size())); - for (Partition _iter142 : this.new_parts) + for (Partition _iter156 : this.new_parts) { - _iter142.write(oprot); + _iter156.write(oprot); } oprot.writeListEnd(); } @@ -24821,13 +26507,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list143 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list143.size); - for (int _i144 = 0; _i144 < _list143.size; ++_i144) + org.apache.thrift.protocol.TList _list157 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list157.size); + for (int _i158 = 0; _i158 < _list157.size; ++_i158) { - String _elem145; // required - _elem145 = iprot.readString(); - this.part_vals.add(_elem145); + String _elem159; // required + _elem159 = iprot.readString(); + this.part_vals.add(_elem159); } iprot.readListEnd(); } @@ -24862,9 +26548,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter146 : this.part_vals) + for (String _iter160 : this.part_vals) { - oprot.writeString(_iter146); + oprot.writeString(_iter160); } oprot.writeListEnd(); } @@ -26985,13 +28671,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list147 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list147.size); - for (int _i148 = 0; _i148 < _list147.size; ++_i148) + org.apache.thrift.protocol.TList _list161 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list161.size); + for (int _i162 = 0; _i162 < _list161.size; ++_i162) { - String _elem149; // required - _elem149 = iprot.readString(); - this.part_vals.add(_elem149); + String _elem163; // required + _elem163 = iprot.readString(); + this.part_vals.add(_elem163); } iprot.readListEnd(); } @@ -27034,9 +28720,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter150 : this.part_vals) + for (String _iter164 : this.part_vals) { - oprot.writeString(_iter150); + oprot.writeString(_iter164); } oprot.writeListEnd(); } @@ -27578,6 +29264,1246 @@ } + public static class drop_partition_on_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_partition_on_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)4); + private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)5); + + private String cluster_name; // required + private String db_name; // required + private String tbl_name; // required + private List part_vals; // required + private boolean deleteData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER_NAME((short)1, "cluster_name"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"), + PART_VALS((short)4, "part_vals"), + DELETE_DATA((short)5, "deleteData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER_NAME + return CLUSTER_NAME; + case 2: // DB_NAME + return DB_NAME; + case 3: // TBL_NAME + return TBL_NAME; + case 4: // PART_VALS + return PART_VALS; + case 5: // DELETE_DATA + return DELETE_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __DELETEDATA_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER_NAME, new org.apache.thrift.meta_data.FieldMetaData("cluster_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_partition_on_cluster_args.class, metaDataMap); + } + + public drop_partition_on_cluster_args() { + } + + public drop_partition_on_cluster_args( + String cluster_name, + String db_name, + String tbl_name, + List part_vals, + boolean deleteData) + { + this(); + this.cluster_name = cluster_name; + this.db_name = db_name; + this.tbl_name = tbl_name; + this.part_vals = part_vals; + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public drop_partition_on_cluster_args(drop_partition_on_cluster_args other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + if (other.isSetCluster_name()) { + this.cluster_name = other.cluster_name; + } + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(); + for (String other_element : other.part_vals) { + __this__part_vals.add(other_element); + } + this.part_vals = __this__part_vals; + } + this.deleteData = other.deleteData; + } + + public drop_partition_on_cluster_args deepCopy() { + return new drop_partition_on_cluster_args(this); + } + + @Override + public void clear() { + this.cluster_name = null; + this.db_name = null; + this.tbl_name = null; + this.part_vals = null; + setDeleteDataIsSet(false); + this.deleteData = false; + } + + public String getCluster_name() { + return this.cluster_name; + } + + public void setCluster_name(String cluster_name) { + this.cluster_name = cluster_name; + } + + public void unsetCluster_name() { + this.cluster_name = null; + } + + /** Returns true if field cluster_name is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster_name() { + return this.cluster_name != null; + } + + public void setCluster_nameIsSet(boolean value) { + if (!value) { + this.cluster_name = null; + } + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public void setPart_valsIsSet(boolean value) { + if (!value) { + this.part_vals = null; + } + } + + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + public void unsetDeleteData() { + __isset_bit_vector.clear(__DELETEDATA_ISSET_ID); + } + + /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ + public boolean isSetDeleteData() { + return __isset_bit_vector.get(__DELETEDATA_ISSET_ID); + } + + public void setDeleteDataIsSet(boolean value) { + __isset_bit_vector.set(__DELETEDATA_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER_NAME: + if (value == null) { + unsetCluster_name(); + } else { + setCluster_name((String)value); + } + break; + + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + + case DELETE_DATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER_NAME: + return getCluster_name(); + + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case PART_VALS: + return getPart_vals(); + + case DELETE_DATA: + return Boolean.valueOf(isDeleteData()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER_NAME: + return isSetCluster_name(); + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); + case DELETE_DATA: + return isSetDeleteData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_partition_on_cluster_args) + return this.equals((drop_partition_on_cluster_args)that); + return false; + } + + public boolean equals(drop_partition_on_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster_name = true && this.isSetCluster_name(); + boolean that_present_cluster_name = true && that.isSetCluster_name(); + if (this_present_cluster_name || that_present_cluster_name) { + if (!(this_present_cluster_name && that_present_cluster_name)) + return false; + if (!this.cluster_name.equals(that.cluster_name)) + return false; + } + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_partition_on_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_partition_on_cluster_args typedOther = (drop_partition_on_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster_name()).compareTo(typedOther.isSetCluster_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster_name, typedOther.cluster_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(typedOther.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, typedOther.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(typedOther.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, typedOther.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(typedOther.isSetPart_vals()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPart_vals()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, typedOther.part_vals); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(typedOther.isSetDeleteData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDeleteData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, typedOther.deleteData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.cluster_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // DB_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.db_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // TBL_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.tbl_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // PART_VALS + if (field.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list165 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list165.size); + for (int _i166 = 0; _i166 < _list165.size; ++_i166) + { + String _elem167; // required + _elem167 = iprot.readString(); + this.part_vals.add(_elem167); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 5: // DELETE_DATA + if (field.type == org.apache.thrift.protocol.TType.BOOL) { + this.deleteData = iprot.readBool(); + setDeleteDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster_name != null) { + oprot.writeFieldBegin(CLUSTER_NAME_FIELD_DESC); + oprot.writeString(this.cluster_name); + oprot.writeFieldEnd(); + } + if (this.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(this.db_name); + oprot.writeFieldEnd(); + } + if (this.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(this.tbl_name); + oprot.writeFieldEnd(); + } + if (this.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); + for (String _iter168 : this.part_vals) + { + oprot.writeString(_iter168); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(this.deleteData); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_partition_on_cluster_args("); + boolean first = true; + + sb.append("cluster_name:"); + if (this.cluster_name == null) { + sb.append("null"); + } else { + sb.append(this.cluster_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class drop_partition_on_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_partition_on_cluster_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private boolean success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + private InvalidOperationException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_partition_on_cluster_result.class, metaDataMap); + } + + public drop_partition_on_cluster_result() { + } + + public drop_partition_on_cluster_result( + boolean success, + NoSuchObjectException o1, + MetaException o2, + InvalidOperationException o3) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public drop_partition_on_cluster_result(drop_partition_on_cluster_result other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new InvalidOperationException(other.o3); + } + } + + public drop_partition_on_cluster_result deepCopy() { + return new drop_partition_on_cluster_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public boolean isSuccess() { + return this.success; + } + + public void setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + } + + public void unsetSuccess() { + __isset_bit_vector.clear(__SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return __isset_bit_vector.get(__SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bit_vector.set(__SUCCESS_ISSET_ID, value); + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public InvalidOperationException getO3() { + return this.o3; + } + + public void setO3(InvalidOperationException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((InvalidOperationException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return Boolean.valueOf(isSuccess()); + + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_partition_on_cluster_result) + return this.equals((drop_partition_on_cluster_result)that); + return false; + } + + public boolean equals(drop_partition_on_cluster_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_partition_on_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_partition_on_cluster_result typedOther = (drop_partition_on_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 0: // SUCCESS + if (field.type == org.apache.thrift.protocol.TType.BOOL) { + this.success = iprot.readBool(); + setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new MetaException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // O3 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o3 = new InvalidOperationException(); + this.o3.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(this.success); + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_partition_on_cluster_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + public static class drop_partition_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_partition_by_name_args"); @@ -29005,13 +31931,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list151 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list151.size); - for (int _i152 = 0; _i152 < _list151.size; ++_i152) + org.apache.thrift.protocol.TList _list169 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list169.size); + for (int _i170 = 0; _i170 < _list169.size; ++_i170) { - String _elem153; // required - _elem153 = iprot.readString(); - this.part_vals.add(_elem153); + String _elem171; // required + _elem171 = iprot.readString(); + this.part_vals.add(_elem171); } iprot.readListEnd(); } @@ -29046,9 +31972,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter154 : this.part_vals) + for (String _iter172 : this.part_vals) { - oprot.writeString(_iter154); + oprot.writeString(_iter172); } oprot.writeListEnd(); } @@ -30138,13 +33064,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list155 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list155.size); - for (int _i156 = 0; _i156 < _list155.size; ++_i156) + org.apache.thrift.protocol.TList _list173 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list173.size); + for (int _i174 = 0; _i174 < _list173.size; ++_i174) { - String _elem157; // required - _elem157 = iprot.readString(); - this.part_vals.add(_elem157); + String _elem175; // required + _elem175 = iprot.readString(); + this.part_vals.add(_elem175); } iprot.readListEnd(); } @@ -30162,13 +33088,13 @@ case 5: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list158 = iprot.readListBegin(); - this.group_names = new ArrayList(_list158.size); - for (int _i159 = 0; _i159 < _list158.size; ++_i159) + org.apache.thrift.protocol.TList _list176 = iprot.readListBegin(); + this.group_names = new ArrayList(_list176.size); + for (int _i177 = 0; _i177 < _list176.size; ++_i177) { - String _elem160; // required - _elem160 = iprot.readString(); - this.group_names.add(_elem160); + String _elem178; // required + _elem178 = iprot.readString(); + this.group_names.add(_elem178); } iprot.readListEnd(); } @@ -30203,9 +33129,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter161 : this.part_vals) + for (String _iter179 : this.part_vals) { - oprot.writeString(_iter161); + oprot.writeString(_iter179); } oprot.writeListEnd(); } @@ -30220,9 +33146,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter162 : this.group_names) + for (String _iter180 : this.group_names) { - oprot.writeString(_iter162); + oprot.writeString(_iter180); } oprot.writeListEnd(); } @@ -32570,14 +35496,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list163 = iprot.readListBegin(); - this.success = new ArrayList(_list163.size); - for (int _i164 = 0; _i164 < _list163.size; ++_i164) + org.apache.thrift.protocol.TList _list181 = iprot.readListBegin(); + this.success = new ArrayList(_list181.size); + for (int _i182 = 0; _i182 < _list181.size; ++_i182) { - Partition _elem165; // required - _elem165 = new Partition(); - _elem165.read(iprot); - this.success.add(_elem165); + Partition _elem183; // required + _elem183 = new Partition(); + _elem183.read(iprot); + this.success.add(_elem183); } iprot.readListEnd(); } @@ -32617,9 +35543,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter166 : this.success) + for (Partition _iter184 : this.success) { - _iter166.write(oprot); + _iter184.write(oprot); } oprot.writeListEnd(); } @@ -33247,13 +36173,13 @@ case 5: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list167 = iprot.readListBegin(); - this.group_names = new ArrayList(_list167.size); - for (int _i168 = 0; _i168 < _list167.size; ++_i168) + org.apache.thrift.protocol.TList _list185 = iprot.readListBegin(); + this.group_names = new ArrayList(_list185.size); + for (int _i186 = 0; _i186 < _list185.size; ++_i186) { - String _elem169; // required - _elem169 = iprot.readString(); - this.group_names.add(_elem169); + String _elem187; // required + _elem187 = iprot.readString(); + this.group_names.add(_elem187); } iprot.readListEnd(); } @@ -33296,9 +36222,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter170 : this.group_names) + for (String _iter188 : this.group_names) { - oprot.writeString(_iter170); + oprot.writeString(_iter188); } oprot.writeListEnd(); } @@ -33762,14 +36688,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list171 = iprot.readListBegin(); - this.success = new ArrayList(_list171.size); - for (int _i172 = 0; _i172 < _list171.size; ++_i172) + org.apache.thrift.protocol.TList _list189 = iprot.readListBegin(); + this.success = new ArrayList(_list189.size); + for (int _i190 = 0; _i190 < _list189.size; ++_i190) { - Partition _elem173; // required - _elem173 = new Partition(); - _elem173.read(iprot); - this.success.add(_elem173); + Partition _elem191; // required + _elem191 = new Partition(); + _elem191.read(iprot); + this.success.add(_elem191); } iprot.readListEnd(); } @@ -33809,9 +36735,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter174 : this.success) + for (Partition _iter192 : this.success) { - _iter174.write(oprot); + _iter192.write(oprot); } oprot.writeListEnd(); } @@ -34673,13 +37599,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list175 = iprot.readListBegin(); - this.success = new ArrayList(_list175.size); - for (int _i176 = 0; _i176 < _list175.size; ++_i176) + org.apache.thrift.protocol.TList _list193 = iprot.readListBegin(); + this.success = new ArrayList(_list193.size); + for (int _i194 = 0; _i194 < _list193.size; ++_i194) { - String _elem177; // required - _elem177 = iprot.readString(); - this.success.add(_elem177); + String _elem195; // required + _elem195 = iprot.readString(); + this.success.add(_elem195); } iprot.readListEnd(); } @@ -34711,9 +37637,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter178 : this.success) + for (String _iter196 : this.success) { - oprot.writeString(_iter178); + oprot.writeString(_iter196); } oprot.writeListEnd(); } @@ -35246,13 +38172,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list179 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list179.size); - for (int _i180 = 0; _i180 < _list179.size; ++_i180) + org.apache.thrift.protocol.TList _list197 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list197.size); + for (int _i198 = 0; _i198 < _list197.size; ++_i198) { - String _elem181; // required - _elem181 = iprot.readString(); - this.part_vals.add(_elem181); + String _elem199; // required + _elem199 = iprot.readString(); + this.part_vals.add(_elem199); } iprot.readListEnd(); } @@ -35295,9 +38221,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter182 : this.part_vals) + for (String _iter200 : this.part_vals) { - oprot.writeString(_iter182); + oprot.writeString(_iter200); } oprot.writeListEnd(); } @@ -35756,14 +38682,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list183 = iprot.readListBegin(); - this.success = new ArrayList(_list183.size); - for (int _i184 = 0; _i184 < _list183.size; ++_i184) + org.apache.thrift.protocol.TList _list201 = iprot.readListBegin(); + this.success = new ArrayList(_list201.size); + for (int _i202 = 0; _i202 < _list201.size; ++_i202) { - Partition _elem185; // required - _elem185 = new Partition(); - _elem185.read(iprot); - this.success.add(_elem185); + Partition _elem203; // required + _elem203 = new Partition(); + _elem203.read(iprot); + this.success.add(_elem203); } iprot.readListEnd(); } @@ -35803,9 +38729,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter186 : this.success) + for (Partition _iter204 : this.success) { - _iter186.write(oprot); + _iter204.write(oprot); } oprot.writeListEnd(); } @@ -36506,13 +39432,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list187 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list187.size); - for (int _i188 = 0; _i188 < _list187.size; ++_i188) + org.apache.thrift.protocol.TList _list205 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list205.size); + for (int _i206 = 0; _i206 < _list205.size; ++_i206) { - String _elem189; // required - _elem189 = iprot.readString(); - this.part_vals.add(_elem189); + String _elem207; // required + _elem207 = iprot.readString(); + this.part_vals.add(_elem207); } iprot.readListEnd(); } @@ -36538,13 +39464,13 @@ case 6: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list190 = iprot.readListBegin(); - this.group_names = new ArrayList(_list190.size); - for (int _i191 = 0; _i191 < _list190.size; ++_i191) + org.apache.thrift.protocol.TList _list208 = iprot.readListBegin(); + this.group_names = new ArrayList(_list208.size); + for (int _i209 = 0; _i209 < _list208.size; ++_i209) { - String _elem192; // required - _elem192 = iprot.readString(); - this.group_names.add(_elem192); + String _elem210; // required + _elem210 = iprot.readString(); + this.group_names.add(_elem210); } iprot.readListEnd(); } @@ -36579,9 +39505,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter193 : this.part_vals) + for (String _iter211 : this.part_vals) { - oprot.writeString(_iter193); + oprot.writeString(_iter211); } oprot.writeListEnd(); } @@ -36599,9 +39525,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter194 : this.group_names) + for (String _iter212 : this.group_names) { - oprot.writeString(_iter194); + oprot.writeString(_iter212); } oprot.writeListEnd(); } @@ -37073,14 +39999,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list195 = iprot.readListBegin(); - this.success = new ArrayList(_list195.size); - for (int _i196 = 0; _i196 < _list195.size; ++_i196) + org.apache.thrift.protocol.TList _list213 = iprot.readListBegin(); + this.success = new ArrayList(_list213.size); + for (int _i214 = 0; _i214 < _list213.size; ++_i214) { - Partition _elem197; // required - _elem197 = new Partition(); - _elem197.read(iprot); - this.success.add(_elem197); + Partition _elem215; // required + _elem215 = new Partition(); + _elem215.read(iprot); + this.success.add(_elem215); } iprot.readListEnd(); } @@ -37120,9 +40046,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter198 : this.success) + for (Partition _iter216 : this.success) { - _iter198.write(oprot); + _iter216.write(oprot); } oprot.writeListEnd(); } @@ -37667,13 +40593,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list199 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list199.size); - for (int _i200 = 0; _i200 < _list199.size; ++_i200) + org.apache.thrift.protocol.TList _list217 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list217.size); + for (int _i218 = 0; _i218 < _list217.size; ++_i218) { - String _elem201; // required - _elem201 = iprot.readString(); - this.part_vals.add(_elem201); + String _elem219; // required + _elem219 = iprot.readString(); + this.part_vals.add(_elem219); } iprot.readListEnd(); } @@ -37716,9 +40642,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter202 : this.part_vals) + for (String _iter220 : this.part_vals) { - oprot.writeString(_iter202); + oprot.writeString(_iter220); } oprot.writeListEnd(); } @@ -38177,13 +41103,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list203 = iprot.readListBegin(); - this.success = new ArrayList(_list203.size); - for (int _i204 = 0; _i204 < _list203.size; ++_i204) + org.apache.thrift.protocol.TList _list221 = iprot.readListBegin(); + this.success = new ArrayList(_list221.size); + for (int _i222 = 0; _i222 < _list221.size; ++_i222) { - String _elem205; // required - _elem205 = iprot.readString(); - this.success.add(_elem205); + String _elem223; // required + _elem223 = iprot.readString(); + this.success.add(_elem223); } iprot.readListEnd(); } @@ -38223,9 +41149,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter206 : this.success) + for (String _iter224 : this.success) { - oprot.writeString(_iter206); + oprot.writeString(_iter224); } oprot.writeListEnd(); } @@ -39243,14 +42169,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list207 = iprot.readListBegin(); - this.success = new ArrayList(_list207.size); - for (int _i208 = 0; _i208 < _list207.size; ++_i208) + org.apache.thrift.protocol.TList _list225 = iprot.readListBegin(); + this.success = new ArrayList(_list225.size); + for (int _i226 = 0; _i226 < _list225.size; ++_i226) { - Partition _elem209; // required - _elem209 = new Partition(); - _elem209.read(iprot); - this.success.add(_elem209); + Partition _elem227; // required + _elem227 = new Partition(); + _elem227.read(iprot); + this.success.add(_elem227); } iprot.readListEnd(); } @@ -39290,9 +42216,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter210 : this.success) + for (Partition _iter228 : this.success) { - _iter210.write(oprot); + _iter228.write(oprot); } oprot.writeListEnd(); } @@ -39764,13 +42690,13 @@ case 3: // NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list211 = iprot.readListBegin(); - this.names = new ArrayList(_list211.size); - for (int _i212 = 0; _i212 < _list211.size; ++_i212) + org.apache.thrift.protocol.TList _list229 = iprot.readListBegin(); + this.names = new ArrayList(_list229.size); + for (int _i230 = 0; _i230 < _list229.size; ++_i230) { - String _elem213; // required - _elem213 = iprot.readString(); - this.names.add(_elem213); + String _elem231; // required + _elem231 = iprot.readString(); + this.names.add(_elem231); } iprot.readListEnd(); } @@ -39805,9 +42731,9 @@ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.names.size())); - for (String _iter214 : this.names) + for (String _iter232 : this.names) { - oprot.writeString(_iter214); + oprot.writeString(_iter232); } oprot.writeListEnd(); } @@ -40257,14 +43183,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list215 = iprot.readListBegin(); - this.success = new ArrayList(_list215.size); - for (int _i216 = 0; _i216 < _list215.size; ++_i216) + org.apache.thrift.protocol.TList _list233 = iprot.readListBegin(); + this.success = new ArrayList(_list233.size); + for (int _i234 = 0; _i234 < _list233.size; ++_i234) { - Partition _elem217; // required - _elem217 = new Partition(); - _elem217.read(iprot); - this.success.add(_elem217); + Partition _elem235; // required + _elem235 = new Partition(); + _elem235.read(iprot); + this.success.add(_elem235); } iprot.readListEnd(); } @@ -40304,9 +43230,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Partition _iter218 : this.success) + for (Partition _iter236 : this.success) { - _iter218.write(oprot); + _iter236.write(oprot); } oprot.writeListEnd(); } @@ -41699,13 +44625,13 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list219 = iprot.readListBegin(); - this.part_vals = new ArrayList(_list219.size); - for (int _i220 = 0; _i220 < _list219.size; ++_i220) + org.apache.thrift.protocol.TList _list237 = iprot.readListBegin(); + this.part_vals = new ArrayList(_list237.size); + for (int _i238 = 0; _i238 < _list237.size; ++_i238) { - String _elem221; // required - _elem221 = iprot.readString(); - this.part_vals.add(_elem221); + String _elem239; // required + _elem239 = iprot.readString(); + this.part_vals.add(_elem239); } iprot.readListEnd(); } @@ -41748,9 +44674,9 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (String _iter222 : this.part_vals) + for (String _iter240 : this.part_vals) { - oprot.writeString(_iter222); + oprot.writeString(_iter240); } oprot.writeListEnd(); } @@ -43584,13 +46510,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list223 = iprot.readListBegin(); - this.success = new ArrayList(_list223.size); - for (int _i224 = 0; _i224 < _list223.size; ++_i224) + org.apache.thrift.protocol.TList _list241 = iprot.readListBegin(); + this.success = new ArrayList(_list241.size); + for (int _i242 = 0; _i242 < _list241.size; ++_i242) { - String _elem225; // required - _elem225 = iprot.readString(); - this.success.add(_elem225); + String _elem243; // required + _elem243 = iprot.readString(); + this.success.add(_elem243); } iprot.readListEnd(); } @@ -43622,9 +46548,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter226 : this.success) + for (String _iter244 : this.success) { - oprot.writeString(_iter226); + oprot.writeString(_iter244); } oprot.writeListEnd(); } @@ -44301,15 +47227,15 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map227 = iprot.readMapBegin(); - this.success = new HashMap(2*_map227.size); - for (int _i228 = 0; _i228 < _map227.size; ++_i228) + org.apache.thrift.protocol.TMap _map245 = iprot.readMapBegin(); + this.success = new HashMap(2*_map245.size); + for (int _i246 = 0; _i246 < _map245.size; ++_i246) { - String _key229; // required - String _val230; // required - _key229 = iprot.readString(); - _val230 = iprot.readString(); - this.success.put(_key229, _val230); + String _key247; // required + String _val248; // required + _key247 = iprot.readString(); + _val248 = iprot.readString(); + this.success.put(_key247, _val248); } iprot.readMapEnd(); } @@ -44341,10 +47267,10 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (Map.Entry _iter231 : this.success.entrySet()) + for (Map.Entry _iter249 : this.success.entrySet()) { - oprot.writeString(_iter231.getKey()); - oprot.writeString(_iter231.getValue()); + oprot.writeString(_iter249.getKey()); + oprot.writeString(_iter249.getValue()); } oprot.writeMapEnd(); } @@ -44889,15 +47815,15 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map232 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map232.size); - for (int _i233 = 0; _i233 < _map232.size; ++_i233) + org.apache.thrift.protocol.TMap _map250 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map250.size); + for (int _i251 = 0; _i251 < _map250.size; ++_i251) { - String _key234; // required - String _val235; // required - _key234 = iprot.readString(); - _val235 = iprot.readString(); - this.part_vals.put(_key234, _val235); + String _key252; // required + String _val253; // required + _key252 = iprot.readString(); + _val253 = iprot.readString(); + this.part_vals.put(_key252, _val253); } iprot.readMapEnd(); } @@ -44939,10 +47865,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (Map.Entry _iter236 : this.part_vals.entrySet()) + for (Map.Entry _iter254 : this.part_vals.entrySet()) { - oprot.writeString(_iter236.getKey()); - oprot.writeString(_iter236.getValue()); + oprot.writeString(_iter254.getKey()); + oprot.writeString(_iter254.getValue()); } oprot.writeMapEnd(); } @@ -46238,15 +49164,15 @@ case 3: // PART_VALS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map237 = iprot.readMapBegin(); - this.part_vals = new HashMap(2*_map237.size); - for (int _i238 = 0; _i238 < _map237.size; ++_i238) + org.apache.thrift.protocol.TMap _map255 = iprot.readMapBegin(); + this.part_vals = new HashMap(2*_map255.size); + for (int _i256 = 0; _i256 < _map255.size; ++_i256) { - String _key239; // required - String _val240; // required - _key239 = iprot.readString(); - _val240 = iprot.readString(); - this.part_vals.put(_key239, _val240); + String _key257; // required + String _val258; // required + _key257 = iprot.readString(); + _val258 = iprot.readString(); + this.part_vals.put(_key257, _val258); } iprot.readMapEnd(); } @@ -46288,10 +49214,10 @@ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.part_vals.size())); - for (Map.Entry _iter241 : this.part_vals.entrySet()) + for (Map.Entry _iter259 : this.part_vals.entrySet()) { - oprot.writeString(_iter241.getKey()); - oprot.writeString(_iter241.getValue()); + oprot.writeString(_iter259.getKey()); + oprot.writeString(_iter259.getValue()); } oprot.writeMapEnd(); } @@ -51897,14 +54823,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list242 = iprot.readListBegin(); - this.success = new ArrayList(_list242.size); - for (int _i243 = 0; _i243 < _list242.size; ++_i243) + org.apache.thrift.protocol.TList _list260 = iprot.readListBegin(); + this.success = new ArrayList(_list260.size); + for (int _i261 = 0; _i261 < _list260.size; ++_i261) { - Index _elem244; // required - _elem244 = new Index(); - _elem244.read(iprot); - this.success.add(_elem244); + Index _elem262; // required + _elem262 = new Index(); + _elem262.read(iprot); + this.success.add(_elem262); } iprot.readListEnd(); } @@ -51944,9 +54870,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Index _iter245 : this.success) + for (Index _iter263 : this.success) { - _iter245.write(oprot); + _iter263.write(oprot); } oprot.writeListEnd(); } @@ -52808,13 +55734,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list246 = iprot.readListBegin(); - this.success = new ArrayList(_list246.size); - for (int _i247 = 0; _i247 < _list246.size; ++_i247) + org.apache.thrift.protocol.TList _list264 = iprot.readListBegin(); + this.success = new ArrayList(_list264.size); + for (int _i265 = 0; _i265 < _list264.size; ++_i265) { - String _elem248; // required - _elem248 = iprot.readString(); - this.success.add(_elem248); + String _elem266; // required + _elem266 = iprot.readString(); + this.success.add(_elem266); } iprot.readListEnd(); } @@ -52846,9 +55772,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter249 : this.success) + for (String _iter267 : this.success) { - oprot.writeString(_iter249); + oprot.writeString(_iter267); } oprot.writeListEnd(); } @@ -52908,6 +55834,3859 @@ } + public static class create_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private Cluster cluster; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER((short)1, "cluster"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER + return CLUSTER; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER, new org.apache.thrift.meta_data.FieldMetaData("cluster", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Cluster.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_cluster_args.class, metaDataMap); + } + + public create_cluster_args() { + } + + public create_cluster_args( + Cluster cluster) + { + this(); + this.cluster = cluster; + } + + /** + * Performs a deep copy on other. + */ + public create_cluster_args(create_cluster_args other) { + if (other.isSetCluster()) { + this.cluster = new Cluster(other.cluster); + } + } + + public create_cluster_args deepCopy() { + return new create_cluster_args(this); + } + + @Override + public void clear() { + this.cluster = null; + } + + public Cluster getCluster() { + return this.cluster; + } + + public void setCluster(Cluster cluster) { + this.cluster = cluster; + } + + public void unsetCluster() { + this.cluster = null; + } + + /** Returns true if field cluster is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster() { + return this.cluster != null; + } + + public void setClusterIsSet(boolean value) { + if (!value) { + this.cluster = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER: + if (value == null) { + unsetCluster(); + } else { + setCluster((Cluster)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER: + return getCluster(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER: + return isSetCluster(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_cluster_args) + return this.equals((create_cluster_args)that); + return false; + } + + public boolean equals(create_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster = true && this.isSetCluster(); + boolean that_present_cluster = true && that.isSetCluster(); + if (this_present_cluster || that_present_cluster) { + if (!(this_present_cluster && that_present_cluster)) + return false; + if (!this.cluster.equals(that.cluster)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(create_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + create_cluster_args typedOther = (create_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster()).compareTo(typedOther.isSetCluster()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster, typedOther.cluster); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.cluster = new Cluster(); + this.cluster.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster != null) { + oprot.writeFieldBegin(CLUSTER_FIELD_DESC); + this.cluster.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_cluster_args("); + boolean first = true; + + sb.append("cluster:"); + if (this.cluster == null) { + sb.append("null"); + } else { + sb.append(this.cluster); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class create_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_cluster_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private AlreadyExistsException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_cluster_result.class, metaDataMap); + } + + public create_cluster_result() { + } + + public create_cluster_result( + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public create_cluster_result(create_cluster_result other) { + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public create_cluster_result deepCopy() { + return new create_cluster_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_cluster_result) + return this.equals((create_cluster_result)that); + return false; + } + + public boolean equals(create_cluster_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(create_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + create_cluster_result typedOther = (create_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new AlreadyExistsException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new InvalidObjectException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // O3 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_cluster_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class drop_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster_name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private String cluster_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER_NAME((short)1, "cluster_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER_NAME + return CLUSTER_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER_NAME, new org.apache.thrift.meta_data.FieldMetaData("cluster_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_cluster_args.class, metaDataMap); + } + + public drop_cluster_args() { + } + + public drop_cluster_args( + String cluster_name) + { + this(); + this.cluster_name = cluster_name; + } + + /** + * Performs a deep copy on other. + */ + public drop_cluster_args(drop_cluster_args other) { + if (other.isSetCluster_name()) { + this.cluster_name = other.cluster_name; + } + } + + public drop_cluster_args deepCopy() { + return new drop_cluster_args(this); + } + + @Override + public void clear() { + this.cluster_name = null; + } + + public String getCluster_name() { + return this.cluster_name; + } + + public void setCluster_name(String cluster_name) { + this.cluster_name = cluster_name; + } + + public void unsetCluster_name() { + this.cluster_name = null; + } + + /** Returns true if field cluster_name is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster_name() { + return this.cluster_name != null; + } + + public void setCluster_nameIsSet(boolean value) { + if (!value) { + this.cluster_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER_NAME: + if (value == null) { + unsetCluster_name(); + } else { + setCluster_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER_NAME: + return getCluster_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER_NAME: + return isSetCluster_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_cluster_args) + return this.equals((drop_cluster_args)that); + return false; + } + + public boolean equals(drop_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster_name = true && this.isSetCluster_name(); + boolean that_present_cluster_name = true && that.isSetCluster_name(); + if (this_present_cluster_name || that_present_cluster_name) { + if (!(this_present_cluster_name && that_present_cluster_name)) + return false; + if (!this.cluster_name.equals(that.cluster_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_cluster_args typedOther = (drop_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster_name()).compareTo(typedOther.isSetCluster_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster_name, typedOther.cluster_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.cluster_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster_name != null) { + oprot.writeFieldBegin(CLUSTER_NAME_FIELD_DESC); + oprot.writeString(this.cluster_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_cluster_args("); + boolean first = true; + + sb.append("cluster_name:"); + if (this.cluster_name == null) { + sb.append("null"); + } else { + sb.append(this.cluster_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class drop_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_cluster_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private NoSuchObjectException o1; // required + private InvalidOperationException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_cluster_result.class, metaDataMap); + } + + public drop_cluster_result() { + } + + public drop_cluster_result( + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public drop_cluster_result(drop_cluster_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public drop_cluster_result deepCopy() { + return new drop_cluster_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_cluster_result) + return this.equals((drop_cluster_result)that); + return false; + } + + public boolean equals(drop_cluster_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(drop_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + drop_cluster_result typedOther = (drop_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new NoSuchObjectException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new InvalidOperationException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // O3 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o3 = new MetaException(); + this.o3.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_cluster_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class get_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster_name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private String cluster_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER_NAME((short)1, "cluster_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER_NAME + return CLUSTER_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER_NAME, new org.apache.thrift.meta_data.FieldMetaData("cluster_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_cluster_args.class, metaDataMap); + } + + public get_cluster_args() { + } + + public get_cluster_args( + String cluster_name) + { + this(); + this.cluster_name = cluster_name; + } + + /** + * Performs a deep copy on other. + */ + public get_cluster_args(get_cluster_args other) { + if (other.isSetCluster_name()) { + this.cluster_name = other.cluster_name; + } + } + + public get_cluster_args deepCopy() { + return new get_cluster_args(this); + } + + @Override + public void clear() { + this.cluster_name = null; + } + + public String getCluster_name() { + return this.cluster_name; + } + + public void setCluster_name(String cluster_name) { + this.cluster_name = cluster_name; + } + + public void unsetCluster_name() { + this.cluster_name = null; + } + + /** Returns true if field cluster_name is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster_name() { + return this.cluster_name != null; + } + + public void setCluster_nameIsSet(boolean value) { + if (!value) { + this.cluster_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER_NAME: + if (value == null) { + unsetCluster_name(); + } else { + setCluster_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER_NAME: + return getCluster_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER_NAME: + return isSetCluster_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_cluster_args) + return this.equals((get_cluster_args)that); + return false; + } + + public boolean equals(get_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster_name = true && this.isSetCluster_name(); + boolean that_present_cluster_name = true && that.isSetCluster_name(); + if (this_present_cluster_name || that_present_cluster_name) { + if (!(this_present_cluster_name && that_present_cluster_name)) + return false; + if (!this.cluster_name.equals(that.cluster_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(get_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + get_cluster_args typedOther = (get_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster_name()).compareTo(typedOther.isSetCluster_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster_name, typedOther.cluster_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.cluster_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster_name != null) { + oprot.writeFieldBegin(CLUSTER_NAME_FIELD_DESC); + oprot.writeString(this.cluster_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_cluster_args("); + boolean first = true; + + sb.append("cluster_name:"); + if (this.cluster_name == null) { + sb.append("null"); + } else { + sb.append(this.cluster_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class get_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_cluster_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private Cluster success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Cluster.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_cluster_result.class, metaDataMap); + } + + public get_cluster_result() { + } + + public get_cluster_result( + Cluster success, + MetaException o1, + NoSuchObjectException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_cluster_result(get_cluster_result other) { + if (other.isSetSuccess()) { + this.success = new Cluster(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchObjectException(other.o2); + } + } + + public get_cluster_result deepCopy() { + return new get_cluster_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public Cluster getSuccess() { + return this.success; + } + + public void setSuccess(Cluster success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { + return this.o2; + } + + public void setO2(NoSuchObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Cluster)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchObjectException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_cluster_result) + return this.equals((get_cluster_result)that); + return false; + } + + public boolean equals(get_cluster_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(get_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + get_cluster_result typedOther = (get_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 0: // SUCCESS + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.success = new Cluster(); + this.success.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new NoSuchObjectException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + this.success.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_cluster_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class list_clusters_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("list_clusters_args"); + + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(list_clusters_args.class, metaDataMap); + } + + public list_clusters_args() { + } + + /** + * Performs a deep copy on other. + */ + public list_clusters_args(list_clusters_args other) { + } + + public list_clusters_args deepCopy() { + return new list_clusters_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof list_clusters_args) + return this.equals((list_clusters_args)that); + return false; + } + + public boolean equals(list_clusters_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(list_clusters_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + list_clusters_args typedOther = (list_clusters_args)other; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("list_clusters_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class list_clusters_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("list_clusters_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private List success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Cluster.class)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(list_clusters_result.class, metaDataMap); + } + + public list_clusters_result() { + } + + public list_clusters_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public list_clusters_result(list_clusters_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(); + for (Cluster other_element : other.success) { + __this__success.add(new Cluster(other_element)); + } + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public list_clusters_result deepCopy() { + return new list_clusters_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(Cluster elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof list_clusters_result) + return this.equals((list_clusters_result)that); + return false; + } + + public boolean equals(list_clusters_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(list_clusters_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + list_clusters_result typedOther = (list_clusters_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 0: // SUCCESS + if (field.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list268 = iprot.readListBegin(); + this.success = new ArrayList(_list268.size); + for (int _i269 = 0; _i269 < _list268.size; ++_i269) + { + Cluster _elem270; // required + _elem270 = new Cluster(); + _elem270.read(iprot); + this.success.add(_elem270); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); + for (Cluster _iter271 : this.success) + { + _iter271.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } else if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("list_clusters_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class alter_cluster_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_cluster_args"); + + private static final org.apache.thrift.protocol.TField CLUSTER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField CLUSTER_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private String cluster_name; // required + private Cluster cluster; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER_NAME((short)1, "cluster_name"), + CLUSTER((short)2, "cluster"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER_NAME + return CLUSTER_NAME; + case 2: // CLUSTER + return CLUSTER; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER_NAME, new org.apache.thrift.meta_data.FieldMetaData("cluster_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CLUSTER, new org.apache.thrift.meta_data.FieldMetaData("cluster", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Cluster.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_cluster_args.class, metaDataMap); + } + + public alter_cluster_args() { + } + + public alter_cluster_args( + String cluster_name, + Cluster cluster) + { + this(); + this.cluster_name = cluster_name; + this.cluster = cluster; + } + + /** + * Performs a deep copy on other. + */ + public alter_cluster_args(alter_cluster_args other) { + if (other.isSetCluster_name()) { + this.cluster_name = other.cluster_name; + } + if (other.isSetCluster()) { + this.cluster = new Cluster(other.cluster); + } + } + + public alter_cluster_args deepCopy() { + return new alter_cluster_args(this); + } + + @Override + public void clear() { + this.cluster_name = null; + this.cluster = null; + } + + public String getCluster_name() { + return this.cluster_name; + } + + public void setCluster_name(String cluster_name) { + this.cluster_name = cluster_name; + } + + public void unsetCluster_name() { + this.cluster_name = null; + } + + /** Returns true if field cluster_name is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster_name() { + return this.cluster_name != null; + } + + public void setCluster_nameIsSet(boolean value) { + if (!value) { + this.cluster_name = null; + } + } + + public Cluster getCluster() { + return this.cluster; + } + + public void setCluster(Cluster cluster) { + this.cluster = cluster; + } + + public void unsetCluster() { + this.cluster = null; + } + + /** Returns true if field cluster is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster() { + return this.cluster != null; + } + + public void setClusterIsSet(boolean value) { + if (!value) { + this.cluster = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER_NAME: + if (value == null) { + unsetCluster_name(); + } else { + setCluster_name((String)value); + } + break; + + case CLUSTER: + if (value == null) { + unsetCluster(); + } else { + setCluster((Cluster)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER_NAME: + return getCluster_name(); + + case CLUSTER: + return getCluster(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER_NAME: + return isSetCluster_name(); + case CLUSTER: + return isSetCluster(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_cluster_args) + return this.equals((alter_cluster_args)that); + return false; + } + + public boolean equals(alter_cluster_args that) { + if (that == null) + return false; + + boolean this_present_cluster_name = true && this.isSetCluster_name(); + boolean that_present_cluster_name = true && that.isSetCluster_name(); + if (this_present_cluster_name || that_present_cluster_name) { + if (!(this_present_cluster_name && that_present_cluster_name)) + return false; + if (!this.cluster_name.equals(that.cluster_name)) + return false; + } + + boolean this_present_cluster = true && this.isSetCluster(); + boolean that_present_cluster = true && that.isSetCluster(); + if (this_present_cluster || that_present_cluster) { + if (!(this_present_cluster && that_present_cluster)) + return false; + if (!this.cluster.equals(that.cluster)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(alter_cluster_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + alter_cluster_args typedOther = (alter_cluster_args)other; + + lastComparison = Boolean.valueOf(isSetCluster_name()).compareTo(typedOther.isSetCluster_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster_name, typedOther.cluster_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCluster()).compareTo(typedOther.isSetCluster()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster, typedOther.cluster); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER_NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.cluster_name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // CLUSTER + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.cluster = new Cluster(); + this.cluster.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster_name != null) { + oprot.writeFieldBegin(CLUSTER_NAME_FIELD_DESC); + oprot.writeString(this.cluster_name); + oprot.writeFieldEnd(); + } + if (this.cluster != null) { + oprot.writeFieldBegin(CLUSTER_FIELD_DESC); + this.cluster.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_cluster_args("); + boolean first = true; + + sb.append("cluster_name:"); + if (this.cluster_name == null) { + sb.append("null"); + } else { + sb.append(this.cluster_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("cluster:"); + if (this.cluster == null) { + sb.append("null"); + } else { + sb.append(this.cluster); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + + public static class alter_cluster_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_cluster_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); + + private MetaException o1; // required + private InvalidObjectException o2; // required + private NoSuchObjectException o3; // required + private AlreadyExistsException o4; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_cluster_result.class, metaDataMap); + } + + public alter_cluster_result() { + } + + public alter_cluster_result( + MetaException o1, + InvalidObjectException o2, + NoSuchObjectException o3, + AlreadyExistsException o4) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + this.o4 = o4; + } + + /** + * Performs a deep copy on other. + */ + public alter_cluster_result(alter_cluster_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new NoSuchObjectException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new AlreadyExistsException(other.o4); + } + } + + public alter_cluster_result deepCopy() { + return new alter_cluster_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + this.o4 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public NoSuchObjectException getO3() { + return this.o3; + } + + public void setO3(NoSuchObjectException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public AlreadyExistsException getO4() { + return this.o4; + } + + public void setO4(AlreadyExistsException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((NoSuchObjectException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((AlreadyExistsException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + case O4: + return getO4(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_cluster_result) + return this.equals((alter_cluster_result)that); + return false; + } + + public boolean equals(alter_cluster_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(alter_cluster_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + alter_cluster_result typedOther = (alter_cluster_result)other; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(typedOther.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, typedOther.o4); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // O1 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o1 = new MetaException(); + this.o1.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // O2 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o2 = new InvalidObjectException(); + this.o2.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // O3 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o3 = new NoSuchObjectException(); + this.o3.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // O4 + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.o4 = new AlreadyExistsException(); + this.o4.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + oprot.writeStructBegin(STRUCT_DESC); + + if (this.isSetO1()) { + oprot.writeFieldBegin(O1_FIELD_DESC); + this.o1.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO2()) { + oprot.writeFieldBegin(O2_FIELD_DESC); + this.o2.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO3()) { + oprot.writeFieldBegin(O3_FIELD_DESC); + this.o3.write(oprot); + oprot.writeFieldEnd(); + } else if (this.isSetO4()) { + oprot.writeFieldBegin(O4_FIELD_DESC); + this.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_cluster_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + } + public static class create_role_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_role_args"); @@ -54777,13 +61556,13 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list250 = iprot.readListBegin(); - this.success = new ArrayList(_list250.size); - for (int _i251 = 0; _i251 < _list250.size; ++_i251) + org.apache.thrift.protocol.TList _list272 = iprot.readListBegin(); + this.success = new ArrayList(_list272.size); + for (int _i273 = 0; _i273 < _list272.size; ++_i273) { - String _elem252; // required - _elem252 = iprot.readString(); - this.success.add(_elem252); + String _elem274; // required + _elem274 = iprot.readString(); + this.success.add(_elem274); } iprot.readListEnd(); } @@ -54815,9 +61594,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter253 : this.success) + for (String _iter275 : this.success) { - oprot.writeString(_iter253); + oprot.writeString(_iter275); } oprot.writeListEnd(); } @@ -57591,14 +64370,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list254 = iprot.readListBegin(); - this.success = new ArrayList(_list254.size); - for (int _i255 = 0; _i255 < _list254.size; ++_i255) + org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); + this.success = new ArrayList(_list276.size); + for (int _i277 = 0; _i277 < _list276.size; ++_i277) { - Role _elem256; // required - _elem256 = new Role(); - _elem256.read(iprot); - this.success.add(_elem256); + Role _elem278; // required + _elem278 = new Role(); + _elem278.read(iprot); + this.success.add(_elem278); } iprot.readListEnd(); } @@ -57630,9 +64409,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (Role _iter257 : this.success) + for (Role _iter279 : this.success) { - _iter257.write(oprot); + _iter279.write(oprot); } oprot.writeListEnd(); } @@ -58093,13 +64872,13 @@ case 3: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list258 = iprot.readListBegin(); - this.group_names = new ArrayList(_list258.size); - for (int _i259 = 0; _i259 < _list258.size; ++_i259) + org.apache.thrift.protocol.TList _list280 = iprot.readListBegin(); + this.group_names = new ArrayList(_list280.size); + for (int _i281 = 0; _i281 < _list280.size; ++_i281) { - String _elem260; // required - _elem260 = iprot.readString(); - this.group_names.add(_elem260); + String _elem282; // required + _elem282 = iprot.readString(); + this.group_names.add(_elem282); } iprot.readListEnd(); } @@ -58134,9 +64913,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter261 : this.group_names) + for (String _iter283 : this.group_names) { - oprot.writeString(_iter261); + oprot.writeString(_iter283); } oprot.writeListEnd(); } @@ -59383,14 +66162,14 @@ case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list262 = iprot.readListBegin(); - this.success = new ArrayList(_list262.size); - for (int _i263 = 0; _i263 < _list262.size; ++_i263) + org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); + this.success = new ArrayList(_list284.size); + for (int _i285 = 0; _i285 < _list284.size; ++_i285) { - HiveObjectPrivilege _elem264; // required - _elem264 = new HiveObjectPrivilege(); - _elem264.read(iprot); - this.success.add(_elem264); + HiveObjectPrivilege _elem286; // required + _elem286 = new HiveObjectPrivilege(); + _elem286.read(iprot); + this.success.add(_elem286); } iprot.readListEnd(); } @@ -59422,9 +66201,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.success.size())); - for (HiveObjectPrivilege _iter265 : this.success) + for (HiveObjectPrivilege _iter287 : this.success) { - _iter265.write(oprot); + _iter287.write(oprot); } oprot.writeListEnd(); } @@ -60911,10 +67690,10 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("user_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.USER_NAME, new org.apache.thrift.meta_data.FieldMetaData("user_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("group_names", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_ugi_args.class, metaDataMap); @@ -61147,31 +67926,31 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // USER_NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.user_name = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // GROUP_NAMES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list266 = iprot.readListBegin(); - this.group_names = new ArrayList(_list266.size); - for (int _i267 = 0; _i267 < _list266.size; ++_i267) + org.apache.thrift.protocol.TList _list288 = iprot.readListBegin(); + this.group_names = new ArrayList(_list288.size); + for (int _i289 = 0; _i289 < _list288.size; ++_i289) { - String _elem268; // required - _elem268 = iprot.readString(); - this.group_names.add(_elem268); + String _elem290; // required + _elem290 = iprot.readString(); + this.group_names.add(_elem290); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -61197,9 +67976,9 @@ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.group_names.size())); - for (String _iter269 : this.group_names) + for (String _iter291 : this.group_names) { - oprot.writeString(_iter269); + oprot.writeString(_iter291); } oprot.writeListEnd(); } @@ -61330,10 +68109,10 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_ugi_result.class, metaDataMap); @@ -61566,24 +68345,24 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 0: // SUCCESS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list270 = iprot.readListBegin(); - this.success = new ArrayList(_list270.size); - for (int _i271 = 0; _i271 < _list270.size; ++_i271) + org.apache.thrift.protocol.TList _list292 = iprot.readListBegin(); + this.success = new ArrayList(_list292.size); + for (int _i293 = 0; _i293 < _list292.size; ++_i293) { - String _elem272; // required - _elem272 = iprot.readString(); - this.success.add(_elem272); + String _elem294; // required + _elem294 = iprot.readString(); + this.success.add(_elem294); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -61591,7 +68370,7 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.o1 = new MetaException(); this.o1.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -61611,9 +68390,9 @@ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.success.size())); - for (String _iter273 : this.success) + for (String _iter295 : this.success) { - oprot.writeString(_iter273); + oprot.writeString(_iter295); } oprot.writeListEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java (working copy) @@ -95,12 +95,12 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.FIELD_SCHEMAS, new org.apache.thrift.meta_data.FieldMetaData("fieldSchemas", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.FIELD_SCHEMAS, new org.apache.thrift.meta_data.FieldMetaData("fieldSchemas", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); - tmpMap.put(_Fields.PROPERTIES, new org.apache.thrift.meta_data.FieldMetaData("properties", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PROPERTIES, new org.apache.thrift.meta_data.FieldMetaData("properties", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Schema.class, metaDataMap); @@ -356,44 +356,44 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // FIELD_SCHEMAS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list89 = iprot.readListBegin(); - this.fieldSchemas = new ArrayList(_list89.size); - for (int _i90 = 0; _i90 < _list89.size; ++_i90) + org.apache.thrift.protocol.TList _list103 = iprot.readListBegin(); + this.fieldSchemas = new ArrayList(_list103.size); + for (int _i104 = 0; _i104 < _list103.size; ++_i104) { - FieldSchema _elem91; // required - _elem91 = new FieldSchema(); - _elem91.read(iprot); - this.fieldSchemas.add(_elem91); + FieldSchema _elem105; // required + _elem105 = new FieldSchema(); + _elem105.read(iprot); + this.fieldSchemas.add(_elem105); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // PROPERTIES if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map92 = iprot.readMapBegin(); - this.properties = new HashMap(2*_map92.size); - for (int _i93 = 0; _i93 < _map92.size; ++_i93) + org.apache.thrift.protocol.TMap _map106 = iprot.readMapBegin(); + this.properties = new HashMap(2*_map106.size); + for (int _i107 = 0; _i107 < _map106.size; ++_i107) { - String _key94; // required - String _val95; // required - _key94 = iprot.readString(); - _val95 = iprot.readString(); - this.properties.put(_key94, _val95); + String _key108; // required + String _val109; // required + _key108 = iprot.readString(); + _val109 = iprot.readString(); + this.properties.put(_key108, _val109); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -414,9 +414,9 @@ oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.fieldSchemas.size())); - for (FieldSchema _iter96 : this.fieldSchemas) + for (FieldSchema _iter110 : this.fieldSchemas) { - _iter96.write(oprot); + _iter110.write(oprot); } oprot.writeListEnd(); } @@ -426,10 +426,10 @@ oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.properties.size())); - for (Map.Entry _iter97 : this.properties.entrySet()) + for (Map.Entry _iter111 : this.properties.entrySet()) { - oprot.writeString(_iter97.getKey()); - oprot.writeString(_iter97.getValue()); + oprot.writeString(_iter111.getKey()); + oprot.writeString(_iter111.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClusterStorageDescriptor.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClusterStorageDescriptor.java (revision 0) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClusterStorageDescriptor.java (revision 0) @@ -0,0 +1,709 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package org.apache.hadoop.hive.metastore.api; + +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ClusterStorageDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterStorageDescriptor"); + + private static final org.apache.thrift.protocol.TField CLUSTER_FIELD_DESC = new org.apache.thrift.protocol.TField("cluster", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField LOCATION_FIELD_DESC = new org.apache.thrift.protocol.TField("location", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PRIMARY_FIELD_DESC = new org.apache.thrift.protocol.TField("primary", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField DATA_SYNCED_FIELD_DESC = new org.apache.thrift.protocol.TField("dataSynced", org.apache.thrift.protocol.TType.BOOL, (short)4); + private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)5); + + private Cluster cluster; // required + private String location; // required + private boolean primary; // required + private boolean dataSynced; // required + private Map parameters; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CLUSTER((short)1, "cluster"), + LOCATION((short)2, "location"), + PRIMARY((short)3, "primary"), + DATA_SYNCED((short)4, "dataSynced"), + PARAMETERS((short)5, "parameters"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CLUSTER + return CLUSTER; + case 2: // LOCATION + return LOCATION; + case 3: // PRIMARY + return PRIMARY; + case 4: // DATA_SYNCED + return DATA_SYNCED; + case 5: // PARAMETERS + return PARAMETERS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __PRIMARY_ISSET_ID = 0; + private static final int __DATASYNCED_ISSET_ID = 1; + private BitSet __isset_bit_vector = new BitSet(2); + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CLUSTER, new org.apache.thrift.meta_data.FieldMetaData("cluster", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Cluster.class))); + tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PRIMARY, new org.apache.thrift.meta_data.FieldMetaData("primary", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.DATA_SYNCED, new org.apache.thrift.meta_data.FieldMetaData("dataSynced", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClusterStorageDescriptor.class, metaDataMap); + } + + public ClusterStorageDescriptor() { + } + + public ClusterStorageDescriptor( + Cluster cluster, + String location, + boolean primary, + boolean dataSynced, + Map parameters) + { + this(); + this.cluster = cluster; + this.location = location; + this.primary = primary; + setPrimaryIsSet(true); + this.dataSynced = dataSynced; + setDataSyncedIsSet(true); + this.parameters = parameters; + } + + /** + * Performs a deep copy on other. + */ + public ClusterStorageDescriptor(ClusterStorageDescriptor other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + if (other.isSetCluster()) { + this.cluster = new Cluster(other.cluster); + } + if (other.isSetLocation()) { + this.location = other.location; + } + this.primary = other.primary; + this.dataSynced = other.dataSynced; + if (other.isSetParameters()) { + Map __this__parameters = new HashMap(); + for (Map.Entry other_element : other.parameters.entrySet()) { + + String other_element_key = other_element.getKey(); + String other_element_value = other_element.getValue(); + + String __this__parameters_copy_key = other_element_key; + + String __this__parameters_copy_value = other_element_value; + + __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value); + } + this.parameters = __this__parameters; + } + } + + public ClusterStorageDescriptor deepCopy() { + return new ClusterStorageDescriptor(this); + } + + @Override + public void clear() { + this.cluster = null; + this.location = null; + setPrimaryIsSet(false); + this.primary = false; + setDataSyncedIsSet(false); + this.dataSynced = false; + this.parameters = null; + } + + public Cluster getCluster() { + return this.cluster; + } + + public void setCluster(Cluster cluster) { + this.cluster = cluster; + } + + public void unsetCluster() { + this.cluster = null; + } + + /** Returns true if field cluster is set (has been assigned a value) and false otherwise */ + public boolean isSetCluster() { + return this.cluster != null; + } + + public void setClusterIsSet(boolean value) { + if (!value) { + this.cluster = null; + } + } + + public String getLocation() { + return this.location; + } + + public void setLocation(String location) { + this.location = location; + } + + public void unsetLocation() { + this.location = null; + } + + /** Returns true if field location is set (has been assigned a value) and false otherwise */ + public boolean isSetLocation() { + return this.location != null; + } + + public void setLocationIsSet(boolean value) { + if (!value) { + this.location = null; + } + } + + public boolean isPrimary() { + return this.primary; + } + + public void setPrimary(boolean primary) { + this.primary = primary; + setPrimaryIsSet(true); + } + + public void unsetPrimary() { + __isset_bit_vector.clear(__PRIMARY_ISSET_ID); + } + + /** Returns true if field primary is set (has been assigned a value) and false otherwise */ + public boolean isSetPrimary() { + return __isset_bit_vector.get(__PRIMARY_ISSET_ID); + } + + public void setPrimaryIsSet(boolean value) { + __isset_bit_vector.set(__PRIMARY_ISSET_ID, value); + } + + public boolean isDataSynced() { + return this.dataSynced; + } + + public void setDataSynced(boolean dataSynced) { + this.dataSynced = dataSynced; + setDataSyncedIsSet(true); + } + + public void unsetDataSynced() { + __isset_bit_vector.clear(__DATASYNCED_ISSET_ID); + } + + /** Returns true if field dataSynced is set (has been assigned a value) and false otherwise */ + public boolean isSetDataSynced() { + return __isset_bit_vector.get(__DATASYNCED_ISSET_ID); + } + + public void setDataSyncedIsSet(boolean value) { + __isset_bit_vector.set(__DATASYNCED_ISSET_ID, value); + } + + public int getParametersSize() { + return (this.parameters == null) ? 0 : this.parameters.size(); + } + + public void putToParameters(String key, String val) { + if (this.parameters == null) { + this.parameters = new HashMap(); + } + this.parameters.put(key, val); + } + + public Map getParameters() { + return this.parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + public void unsetParameters() { + this.parameters = null; + } + + /** Returns true if field parameters is set (has been assigned a value) and false otherwise */ + public boolean isSetParameters() { + return this.parameters != null; + } + + public void setParametersIsSet(boolean value) { + if (!value) { + this.parameters = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CLUSTER: + if (value == null) { + unsetCluster(); + } else { + setCluster((Cluster)value); + } + break; + + case LOCATION: + if (value == null) { + unsetLocation(); + } else { + setLocation((String)value); + } + break; + + case PRIMARY: + if (value == null) { + unsetPrimary(); + } else { + setPrimary((Boolean)value); + } + break; + + case DATA_SYNCED: + if (value == null) { + unsetDataSynced(); + } else { + setDataSynced((Boolean)value); + } + break; + + case PARAMETERS: + if (value == null) { + unsetParameters(); + } else { + setParameters((Map)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CLUSTER: + return getCluster(); + + case LOCATION: + return getLocation(); + + case PRIMARY: + return Boolean.valueOf(isPrimary()); + + case DATA_SYNCED: + return Boolean.valueOf(isDataSynced()); + + case PARAMETERS: + return getParameters(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CLUSTER: + return isSetCluster(); + case LOCATION: + return isSetLocation(); + case PRIMARY: + return isSetPrimary(); + case DATA_SYNCED: + return isSetDataSynced(); + case PARAMETERS: + return isSetParameters(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ClusterStorageDescriptor) + return this.equals((ClusterStorageDescriptor)that); + return false; + } + + public boolean equals(ClusterStorageDescriptor that) { + if (that == null) + return false; + + boolean this_present_cluster = true && this.isSetCluster(); + boolean that_present_cluster = true && that.isSetCluster(); + if (this_present_cluster || that_present_cluster) { + if (!(this_present_cluster && that_present_cluster)) + return false; + if (!this.cluster.equals(that.cluster)) + return false; + } + + boolean this_present_location = true && this.isSetLocation(); + boolean that_present_location = true && that.isSetLocation(); + if (this_present_location || that_present_location) { + if (!(this_present_location && that_present_location)) + return false; + if (!this.location.equals(that.location)) + return false; + } + + boolean this_present_primary = true; + boolean that_present_primary = true; + if (this_present_primary || that_present_primary) { + if (!(this_present_primary && that_present_primary)) + return false; + if (this.primary != that.primary) + return false; + } + + boolean this_present_dataSynced = true; + boolean that_present_dataSynced = true; + if (this_present_dataSynced || that_present_dataSynced) { + if (!(this_present_dataSynced && that_present_dataSynced)) + return false; + if (this.dataSynced != that.dataSynced) + return false; + } + + boolean this_present_parameters = true && this.isSetParameters(); + boolean that_present_parameters = true && that.isSetParameters(); + if (this_present_parameters || that_present_parameters) { + if (!(this_present_parameters && that_present_parameters)) + return false; + if (!this.parameters.equals(that.parameters)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(ClusterStorageDescriptor other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + ClusterStorageDescriptor typedOther = (ClusterStorageDescriptor)other; + + lastComparison = Boolean.valueOf(isSetCluster()).compareTo(typedOther.isSetCluster()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCluster()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cluster, typedOther.cluster); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetLocation()).compareTo(typedOther.isSetLocation()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLocation()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.location, typedOther.location); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPrimary()).compareTo(typedOther.isSetPrimary()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPrimary()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.primary, typedOther.primary); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDataSynced()).compareTo(typedOther.isSetDataSynced()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDataSynced()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dataSynced, typedOther.dataSynced); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParameters()).compareTo(typedOther.isSetParameters()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParameters()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, typedOther.parameters); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // CLUSTER + if (field.type == org.apache.thrift.protocol.TType.STRUCT) { + this.cluster = new Cluster(); + this.cluster.read(iprot); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // LOCATION + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.location = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // PRIMARY + if (field.type == org.apache.thrift.protocol.TType.BOOL) { + this.primary = iprot.readBool(); + setPrimaryIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // DATA_SYNCED + if (field.type == org.apache.thrift.protocol.TType.BOOL) { + this.dataSynced = iprot.readBool(); + setDataSyncedIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 5: // PARAMETERS + if (field.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map44 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map44.size); + for (int _i45 = 0; _i45 < _map44.size; ++_i45) + { + String _key46; // required + String _val47; // required + _key46 = iprot.readString(); + _val47 = iprot.readString(); + this.parameters.put(_key46, _val47); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.cluster != null) { + oprot.writeFieldBegin(CLUSTER_FIELD_DESC); + this.cluster.write(oprot); + oprot.writeFieldEnd(); + } + if (this.location != null) { + oprot.writeFieldBegin(LOCATION_FIELD_DESC); + oprot.writeString(this.location); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(PRIMARY_FIELD_DESC); + oprot.writeBool(this.primary); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(DATA_SYNCED_FIELD_DESC); + oprot.writeBool(this.dataSynced); + oprot.writeFieldEnd(); + if (this.parameters != null) { + oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); + for (Map.Entry _iter48 : this.parameters.entrySet()) + { + oprot.writeString(_iter48.getKey()); + oprot.writeString(_iter48.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ClusterStorageDescriptor("); + boolean first = true; + + sb.append("cluster:"); + if (this.cluster == null) { + sb.append("null"); + } else { + sb.append(this.cluster); + } + first = false; + if (!first) sb.append(", "); + sb.append("location:"); + if (this.location == null) { + sb.append("null"); + } else { + sb.append(this.location); + } + first = false; + if (!first) sb.append(", "); + sb.append("primary:"); + sb.append(this.primary); + first = false; + if (!first) sb.append(", "); + sb.append("dataSynced:"); + sb.append(this.dataSynced); + first = false; + if (!first) sb.append(", "); + sb.append("parameters:"); + if (this.parameters == null) { + sb.append("null"); + } else { + sb.append(this.parameters); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + +} + Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java (working copy) @@ -128,24 +128,24 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class))); - tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, + tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap); @@ -765,38 +765,38 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // VALUES if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list75 = iprot.readListBegin(); - this.values = new ArrayList(_list75.size); - for (int _i76 = 0; _i76 < _list75.size; ++_i76) + org.apache.thrift.protocol.TList _list89 = iprot.readListBegin(); + this.values = new ArrayList(_list89.size); + for (int _i90 = 0; _i90 < _list89.size; ++_i90) { - String _elem77; // required - _elem77 = iprot.readString(); - this.values.add(_elem77); + String _elem91; // required + _elem91 = iprot.readString(); + this.values.add(_elem91); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // DB_NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.dbName = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 3: // TABLE_NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.tableName = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -804,7 +804,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.createTime = iprot.readI32(); setCreateTimeIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -812,7 +812,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.lastAccessTime = iprot.readI32(); setLastAccessTimeIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -820,26 +820,26 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.sd = new StorageDescriptor(); this.sd.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 7: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map78 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map78.size); - for (int _i79 = 0; _i79 < _map78.size; ++_i79) + org.apache.thrift.protocol.TMap _map92 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map92.size); + for (int _i93 = 0; _i93 < _map92.size; ++_i93) { - String _key80; // required - String _val81; // required - _key80 = iprot.readString(); - _val81 = iprot.readString(); - this.parameters.put(_key80, _val81); + String _key94; // required + String _val95; // required + _key94 = iprot.readString(); + _val95 = iprot.readString(); + this.parameters.put(_key94, _val95); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -847,7 +847,7 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.privileges = new PrincipalPrivilegeSet(); this.privileges.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -868,9 +868,9 @@ oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.values.size())); - for (String _iter82 : this.values) + for (String _iter96 : this.values) { - oprot.writeString(_iter82); + oprot.writeString(_iter96); } oprot.writeListEnd(); } @@ -901,10 +901,10 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter83 : this.parameters.entrySet()) + for (Map.Entry _iter97 : this.parameters.entrySet()) { - oprot.writeString(_iter83.getKey()); - oprot.writeString(_iter83.getValue()); + oprot.writeString(_iter97.getKey()); + oprot.writeString(_iter97.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java (working copy) @@ -100,13 +100,13 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.SERIALIZATION_LIB, new org.apache.thrift.meta_data.FieldMetaData("serializationLib", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.SERIALIZATION_LIB, new org.apache.thrift.meta_data.FieldMetaData("serializationLib", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SerDeInfo.class, metaDataMap); @@ -404,40 +404,40 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.name = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // SERIALIZATION_LIB if (field.type == org.apache.thrift.protocol.TType.STRING) { this.serializationLib = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 3: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map44 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map44.size); - for (int _i45 = 0; _i45 < _map44.size; ++_i45) + org.apache.thrift.protocol.TMap _map54 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map54.size); + for (int _i55 = 0; _i55 < _map54.size; ++_i55) { - String _key46; // required - String _val47; // required - _key46 = iprot.readString(); - _val47 = iprot.readString(); - this.parameters.put(_key46, _val47); + String _key56; // required + String _val57; // required + _key56 = iprot.readString(); + _val57 = iprot.readString(); + this.parameters.put(_key56, _val57); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -468,10 +468,10 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter48 : this.parameters.entrySet()) + for (Map.Entry _iter58 : this.parameters.entrySet()) { - oprot.writeString(_iter48.getKey()); - oprot.writeString(_iter48.getValue()); + oprot.writeString(_iter58.getKey()); + oprot.writeString(_iter58.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java (working copy) @@ -154,34 +154,34 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.OWNER, new org.apache.thrift.meta_data.FieldMetaData("owner", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.OWNER, new org.apache.thrift.meta_data.FieldMetaData("owner", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.RETENTION, new org.apache.thrift.meta_data.FieldMetaData("retention", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.RETENTION, new org.apache.thrift.meta_data.FieldMetaData("retention", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class))); - tmpMap.put(_Fields.PARTITION_KEYS, new org.apache.thrift.meta_data.FieldMetaData("partitionKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.PARTITION_KEYS, new org.apache.thrift.meta_data.FieldMetaData("partitionKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); - tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.VIEW_ORIGINAL_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewOriginalText", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.VIEW_ORIGINAL_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewOriginalText", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, + tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); @@ -1105,28 +1105,28 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // TABLE_NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.tableName = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // DB_NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.dbName = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 3: // OWNER if (field.type == org.apache.thrift.protocol.TType.STRING) { this.owner = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1134,7 +1134,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.createTime = iprot.readI32(); setCreateTimeIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1142,7 +1142,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.lastAccessTime = iprot.readI32(); setLastAccessTimeIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1150,7 +1150,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.retention = iprot.readI32(); setRetentionIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1158,65 +1158,65 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.sd = new StorageDescriptor(); this.sd.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 8: // PARTITION_KEYS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list66 = iprot.readListBegin(); - this.partitionKeys = new ArrayList(_list66.size); - for (int _i67 = 0; _i67 < _list66.size; ++_i67) + org.apache.thrift.protocol.TList _list80 = iprot.readListBegin(); + this.partitionKeys = new ArrayList(_list80.size); + for (int _i81 = 0; _i81 < _list80.size; ++_i81) { - FieldSchema _elem68; // required - _elem68 = new FieldSchema(); - _elem68.read(iprot); - this.partitionKeys.add(_elem68); + FieldSchema _elem82; // required + _elem82 = new FieldSchema(); + _elem82.read(iprot); + this.partitionKeys.add(_elem82); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 9: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map69 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map69.size); - for (int _i70 = 0; _i70 < _map69.size; ++_i70) + org.apache.thrift.protocol.TMap _map83 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map83.size); + for (int _i84 = 0; _i84 < _map83.size; ++_i84) { - String _key71; // required - String _val72; // required - _key71 = iprot.readString(); - _val72 = iprot.readString(); - this.parameters.put(_key71, _val72); + String _key85; // required + String _val86; // required + _key85 = iprot.readString(); + _val86 = iprot.readString(); + this.parameters.put(_key85, _val86); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 10: // VIEW_ORIGINAL_TEXT if (field.type == org.apache.thrift.protocol.TType.STRING) { this.viewOriginalText = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 11: // VIEW_EXPANDED_TEXT if (field.type == org.apache.thrift.protocol.TType.STRING) { this.viewExpandedText = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 12: // TABLE_TYPE if (field.type == org.apache.thrift.protocol.TType.STRING) { this.tableType = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1224,7 +1224,7 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.privileges = new PrincipalPrivilegeSet(); this.privileges.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1274,9 +1274,9 @@ oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.partitionKeys.size())); - for (FieldSchema _iter73 : this.partitionKeys) + for (FieldSchema _iter87 : this.partitionKeys) { - _iter73.write(oprot); + _iter87.write(oprot); } oprot.writeListEnd(); } @@ -1286,10 +1286,10 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter74 : this.parameters.entrySet()) + for (Map.Entry _iter88 : this.parameters.entrySet()) { - oprot.writeString(_iter74.getKey()); - oprot.writeString(_iter74.getValue()); + oprot.writeString(_iter88.getKey()); + oprot.writeString(_iter88.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java (working copy) @@ -33,6 +33,7 @@ private static final org.apache.thrift.protocol.TField BUCKET_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("bucketCols", org.apache.thrift.protocol.TType.LIST, (short)8); private static final org.apache.thrift.protocol.TField SORT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("sortCols", org.apache.thrift.protocol.TType.LIST, (short)9); private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)10); + private static final org.apache.thrift.protocol.TField CLUSTER_STORAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("clusterStorage", org.apache.thrift.protocol.TType.LIST, (short)11); private List cols; // required private String location; // required @@ -44,6 +45,7 @@ private List bucketCols; // required private List sortCols; // required private Map parameters; // required + private List clusterStorage; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -56,7 +58,8 @@ SERDE_INFO((short)7, "serdeInfo"), BUCKET_COLS((short)8, "bucketCols"), SORT_COLS((short)9, "sortCols"), - PARAMETERS((short)10, "parameters"); + PARAMETERS((short)10, "parameters"), + CLUSTER_STORAGE((short)11, "clusterStorage"); private static final Map byName = new HashMap(); @@ -91,6 +94,8 @@ return SORT_COLS; case 10: // PARAMETERS return PARAMETERS; + case 11: // CLUSTER_STORAGE + return CLUSTER_STORAGE; default: return null; } @@ -138,31 +143,34 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COLS, new org.apache.thrift.meta_data.FieldMetaData("cols", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.COLS, new org.apache.thrift.meta_data.FieldMetaData("cols", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); - tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.LOCATION, new org.apache.thrift.meta_data.FieldMetaData("location", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.INPUT_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("inputFormat", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.INPUT_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("inputFormat", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.OUTPUT_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("outputFormat", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.OUTPUT_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("outputFormat", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.COMPRESSED, new org.apache.thrift.meta_data.FieldMetaData("compressed", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.COMPRESSED, new org.apache.thrift.meta_data.FieldMetaData("compressed", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.NUM_BUCKETS, new org.apache.thrift.meta_data.FieldMetaData("numBuckets", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.NUM_BUCKETS, new org.apache.thrift.meta_data.FieldMetaData("numBuckets", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.SERDE_INFO, new org.apache.thrift.meta_data.FieldMetaData("serdeInfo", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.SERDE_INFO, new org.apache.thrift.meta_data.FieldMetaData("serdeInfo", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SerDeInfo.class))); - tmpMap.put(_Fields.BUCKET_COLS, new org.apache.thrift.meta_data.FieldMetaData("bucketCols", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.BUCKET_COLS, new org.apache.thrift.meta_data.FieldMetaData("bucketCols", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.SORT_COLS, new org.apache.thrift.meta_data.FieldMetaData("sortCols", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + tmpMap.put(_Fields.SORT_COLS, new org.apache.thrift.meta_data.FieldMetaData("sortCols", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Order.class)))); - tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CLUSTER_STORAGE, new org.apache.thrift.meta_data.FieldMetaData("clusterStorage", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClusterStorageDescriptor.class)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(StorageDescriptor.class, metaDataMap); } @@ -253,6 +261,13 @@ } this.parameters = __this__parameters; } + if (other.isSetClusterStorage()) { + List __this__clusterStorage = new ArrayList(); + for (ClusterStorageDescriptor other_element : other.clusterStorage) { + __this__clusterStorage.add(new ClusterStorageDescriptor(other_element)); + } + this.clusterStorage = __this__clusterStorage; + } } public StorageDescriptor deepCopy() { @@ -273,6 +288,7 @@ this.bucketCols = null; this.sortCols = null; this.parameters = null; + this.clusterStorage = null; } public int getColsSize() { @@ -559,6 +575,44 @@ } } + public int getClusterStorageSize() { + return (this.clusterStorage == null) ? 0 : this.clusterStorage.size(); + } + + public java.util.Iterator getClusterStorageIterator() { + return (this.clusterStorage == null) ? null : this.clusterStorage.iterator(); + } + + public void addToClusterStorage(ClusterStorageDescriptor elem) { + if (this.clusterStorage == null) { + this.clusterStorage = new ArrayList(); + } + this.clusterStorage.add(elem); + } + + public List getClusterStorage() { + return this.clusterStorage; + } + + public void setClusterStorage(List clusterStorage) { + this.clusterStorage = clusterStorage; + } + + public void unsetClusterStorage() { + this.clusterStorage = null; + } + + /** Returns true if field clusterStorage is set (has been assigned a value) and false otherwise */ + public boolean isSetClusterStorage() { + return this.clusterStorage != null; + } + + public void setClusterStorageIsSet(boolean value) { + if (!value) { + this.clusterStorage = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COLS: @@ -641,6 +695,14 @@ } break; + case CLUSTER_STORAGE: + if (value == null) { + unsetClusterStorage(); + } else { + setClusterStorage((List)value); + } + break; + } } @@ -676,6 +738,9 @@ case PARAMETERS: return getParameters(); + case CLUSTER_STORAGE: + return getClusterStorage(); + } throw new IllegalStateException(); } @@ -707,6 +772,8 @@ return isSetSortCols(); case PARAMETERS: return isSetParameters(); + case CLUSTER_STORAGE: + return isSetClusterStorage(); } throw new IllegalStateException(); } @@ -814,6 +881,15 @@ return false; } + boolean this_present_clusterStorage = true && this.isSetClusterStorage(); + boolean that_present_clusterStorage = true && that.isSetClusterStorage(); + if (this_present_clusterStorage || that_present_clusterStorage) { + if (!(this_present_clusterStorage && that_present_clusterStorage)) + return false; + if (!this.clusterStorage.equals(that.clusterStorage)) + return false; + } + return true; } @@ -930,6 +1006,16 @@ return lastComparison; } } + lastComparison = Boolean.valueOf(isSetClusterStorage()).compareTo(typedOther.isSetClusterStorage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetClusterStorage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.clusterStorage, typedOther.clusterStorage); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -943,46 +1029,46 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // COLS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list49 = iprot.readListBegin(); - this.cols = new ArrayList(_list49.size); - for (int _i50 = 0; _i50 < _list49.size; ++_i50) + org.apache.thrift.protocol.TList _list59 = iprot.readListBegin(); + this.cols = new ArrayList(_list59.size); + for (int _i60 = 0; _i60 < _list59.size; ++_i60) { - FieldSchema _elem51; // required - _elem51 = new FieldSchema(); - _elem51.read(iprot); - this.cols.add(_elem51); + FieldSchema _elem61; // required + _elem61 = new FieldSchema(); + _elem61.read(iprot); + this.cols.add(_elem61); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // LOCATION if (field.type == org.apache.thrift.protocol.TType.STRING) { this.location = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 3: // INPUT_FORMAT if (field.type == org.apache.thrift.protocol.TType.STRING) { this.inputFormat = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 4: // OUTPUT_FORMAT if (field.type == org.apache.thrift.protocol.TType.STRING) { this.outputFormat = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -990,7 +1076,7 @@ if (field.type == org.apache.thrift.protocol.TType.BOOL) { this.compressed = iprot.readBool(); setCompressedIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -998,7 +1084,7 @@ if (field.type == org.apache.thrift.protocol.TType.I32) { this.numBuckets = iprot.readI32(); setNumBucketsIsSet(true); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -1006,64 +1092,82 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.serdeInfo = new SerDeInfo(); this.serdeInfo.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 8: // BUCKET_COLS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list52 = iprot.readListBegin(); - this.bucketCols = new ArrayList(_list52.size); - for (int _i53 = 0; _i53 < _list52.size; ++_i53) + org.apache.thrift.protocol.TList _list62 = iprot.readListBegin(); + this.bucketCols = new ArrayList(_list62.size); + for (int _i63 = 0; _i63 < _list62.size; ++_i63) { - String _elem54; // required - _elem54 = iprot.readString(); - this.bucketCols.add(_elem54); + String _elem64; // required + _elem64 = iprot.readString(); + this.bucketCols.add(_elem64); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 9: // SORT_COLS if (field.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list55 = iprot.readListBegin(); - this.sortCols = new ArrayList(_list55.size); - for (int _i56 = 0; _i56 < _list55.size; ++_i56) + org.apache.thrift.protocol.TList _list65 = iprot.readListBegin(); + this.sortCols = new ArrayList(_list65.size); + for (int _i66 = 0; _i66 < _list65.size; ++_i66) { - Order _elem57; // required - _elem57 = new Order(); - _elem57.read(iprot); - this.sortCols.add(_elem57); + Order _elem67; // required + _elem67 = new Order(); + _elem67.read(iprot); + this.sortCols.add(_elem67); } iprot.readListEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 10: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map58.size); - for (int _i59 = 0; _i59 < _map58.size; ++_i59) + org.apache.thrift.protocol.TMap _map68 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map68.size); + for (int _i69 = 0; _i69 < _map68.size; ++_i69) { - String _key60; // required - String _val61; // required - _key60 = iprot.readString(); - _val61 = iprot.readString(); - this.parameters.put(_key60, _val61); + String _key70; // required + String _val71; // required + _key70 = iprot.readString(); + _val71 = iprot.readString(); + this.parameters.put(_key70, _val71); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; + case 11: // CLUSTER_STORAGE + if (field.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list72 = iprot.readListBegin(); + this.clusterStorage = new ArrayList(_list72.size); + for (int _i73 = 0; _i73 < _list72.size; ++_i73) + { + ClusterStorageDescriptor _elem74; // required + _elem74 = new ClusterStorageDescriptor(); + _elem74.read(iprot); + this.clusterStorage.add(_elem74); + } + iprot.readListEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } @@ -1081,9 +1185,9 @@ oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.cols.size())); - for (FieldSchema _iter62 : this.cols) + for (FieldSchema _iter75 : this.cols) { - _iter62.write(oprot); + _iter75.write(oprot); } oprot.writeListEnd(); } @@ -1119,9 +1223,9 @@ oprot.writeFieldBegin(BUCKET_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, this.bucketCols.size())); - for (String _iter63 : this.bucketCols) + for (String _iter76 : this.bucketCols) { - oprot.writeString(_iter63); + oprot.writeString(_iter76); } oprot.writeListEnd(); } @@ -1131,9 +1235,9 @@ oprot.writeFieldBegin(SORT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.sortCols.size())); - for (Order _iter64 : this.sortCols) + for (Order _iter77 : this.sortCols) { - _iter64.write(oprot); + _iter77.write(oprot); } oprot.writeListEnd(); } @@ -1143,15 +1247,29 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter65 : this.parameters.entrySet()) + for (Map.Entry _iter78 : this.parameters.entrySet()) { - oprot.writeString(_iter65.getKey()); - oprot.writeString(_iter65.getValue()); + oprot.writeString(_iter78.getKey()); + oprot.writeString(_iter78.getValue()); } oprot.writeMapEnd(); } oprot.writeFieldEnd(); } + if (this.clusterStorage != null) { + if (isSetClusterStorage()) { + oprot.writeFieldBegin(CLUSTER_STORAGE_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.clusterStorage.size())); + for (ClusterStorageDescriptor _iter79 : this.clusterStorage) + { + _iter79.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1232,6 +1350,16 @@ sb.append(this.parameters); } first = false; + if (isSetClusterStorage()) { + if (!first) sb.append(", "); + sb.append("clusterStorage:"); + if (this.clusterStorage == null) { + sb.append("null"); + } else { + sb.append(this.clusterStorage); + } + first = false; + } sb.append(")"); return sb.toString(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java (working copy) @@ -110,17 +110,17 @@ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, + tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Database.class, metaDataMap); @@ -538,47 +538,47 @@ while (true) { field = iprot.readFieldBegin(); - if (field.type == org.apache.thrift.protocol.TType.STOP) { + if (field.type == org.apache.thrift.protocol.TType.STOP) { break; } switch (field.id) { case 1: // NAME if (field.type == org.apache.thrift.protocol.TType.STRING) { this.name = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 2: // DESCRIPTION if (field.type == org.apache.thrift.protocol.TType.STRING) { this.description = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 3: // LOCATION_URI if (field.type == org.apache.thrift.protocol.TType.STRING) { this.locationUri = iprot.readString(); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; case 4: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map39 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map39.size); - for (int _i40 = 0; _i40 < _map39.size; ++_i40) + org.apache.thrift.protocol.TMap _map49 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map49.size); + for (int _i50 = 0; _i50 < _map49.size; ++_i50) { - String _key41; // required - String _val42; // required - _key41 = iprot.readString(); - _val42 = iprot.readString(); - this.parameters.put(_key41, _val42); + String _key51; // required + String _val52; // required + _key51 = iprot.readString(); + _val52 = iprot.readString(); + this.parameters.put(_key51, _val52); } iprot.readMapEnd(); } - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -586,7 +586,7 @@ if (field.type == org.apache.thrift.protocol.TType.STRUCT) { this.privileges = new PrincipalPrivilegeSet(); this.privileges.read(iprot); - } else { + } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); } break; @@ -622,10 +622,10 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter43 : this.parameters.entrySet()) + for (Map.Entry _iter53 : this.parameters.entrySet()) { - oprot.writeString(_iter43.getKey()); - oprot.writeString(_iter43.getValue()); + oprot.writeString(_iter53.getKey()); + oprot.writeString(_iter53.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java (revision 1235046) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java (working copy) @@ -949,15 +949,15 @@ case 9: // PARAMETERS if (field.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map84 = iprot.readMapBegin(); - this.parameters = new HashMap(2*_map84.size); - for (int _i85 = 0; _i85 < _map84.size; ++_i85) + org.apache.thrift.protocol.TMap _map98 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map98.size); + for (int _i99 = 0; _i99 < _map98.size; ++_i99) { - String _key86; // required - String _val87; // required - _key86 = iprot.readString(); - _val87 = iprot.readString(); - this.parameters.put(_key86, _val87); + String _key100; // required + String _val101; // required + _key100 = iprot.readString(); + _val101 = iprot.readString(); + this.parameters.put(_key100, _val101); } iprot.readMapEnd(); } @@ -1026,10 +1026,10 @@ oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); - for (Map.Entry _iter88 : this.parameters.entrySet()) + for (Map.Entry _iter102 : this.parameters.entrySet()) { - oprot.writeString(_iter88.getKey()); - oprot.writeString(_iter88.getValue()); + oprot.writeString(_iter102.getKey()); + oprot.writeString(_iter102.getValue()); } oprot.writeMapEnd(); } Index: metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Cluster.java =================================================================== --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Cluster.java (revision 0) +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Cluster.java (revision 0) @@ -0,0 +1,713 @@ +/** + * Autogenerated by Thrift Compiler (0.7.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + */ +package org.apache.hadoop.hive.metastore.api; + +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Cluster implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Cluster"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField COMMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("comment", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)3); + private static final org.apache.thrift.protocol.TField LOCATION_URI_FIELD_DESC = new org.apache.thrift.protocol.TField("locationUri", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)5); + + private String name; // required + private String comment; // required + private int createTime; // required + private String locationUri; // required + private Map parameters; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"), + COMMENT((short)2, "comment"), + CREATE_TIME((short)3, "createTime"), + LOCATION_URI((short)4, "locationUri"), + PARAMETERS((short)5, "parameters"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + case 2: // COMMENT + return COMMENT; + case 3: // CREATE_TIME + return CREATE_TIME; + case 4: // LOCATION_URI + return LOCATION_URI; + case 5: // PARAMETERS + return PARAMETERS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __CREATETIME_ISSET_ID = 0; + private BitSet __isset_bit_vector = new BitSet(1); + + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.COMMENT, new org.apache.thrift.meta_data.FieldMetaData("comment", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Cluster.class, metaDataMap); + } + + public Cluster() { + } + + public Cluster( + String name, + String comment, + int createTime, + String locationUri, + Map parameters) + { + this(); + this.name = name; + this.comment = comment; + this.createTime = createTime; + setCreateTimeIsSet(true); + this.locationUri = locationUri; + this.parameters = parameters; + } + + /** + * Performs a deep copy on other. + */ + public Cluster(Cluster other) { + __isset_bit_vector.clear(); + __isset_bit_vector.or(other.__isset_bit_vector); + if (other.isSetName()) { + this.name = other.name; + } + if (other.isSetComment()) { + this.comment = other.comment; + } + this.createTime = other.createTime; + if (other.isSetLocationUri()) { + this.locationUri = other.locationUri; + } + if (other.isSetParameters()) { + Map __this__parameters = new HashMap(); + for (Map.Entry other_element : other.parameters.entrySet()) { + + String other_element_key = other_element.getKey(); + String other_element_value = other_element.getValue(); + + String __this__parameters_copy_key = other_element_key; + + String __this__parameters_copy_value = other_element_value; + + __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value); + } + this.parameters = __this__parameters; + } + } + + public Cluster deepCopy() { + return new Cluster(this); + } + + @Override + public void clear() { + this.name = null; + this.comment = null; + setCreateTimeIsSet(false); + this.createTime = 0; + this.locationUri = null; + this.parameters = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public String getComment() { + return this.comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public void unsetComment() { + this.comment = null; + } + + /** Returns true if field comment is set (has been assigned a value) and false otherwise */ + public boolean isSetComment() { + return this.comment != null; + } + + public void setCommentIsSet(boolean value) { + if (!value) { + this.comment = null; + } + } + + public int getCreateTime() { + return this.createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + setCreateTimeIsSet(true); + } + + public void unsetCreateTime() { + __isset_bit_vector.clear(__CREATETIME_ISSET_ID); + } + + /** Returns true if field createTime is set (has been assigned a value) and false otherwise */ + public boolean isSetCreateTime() { + return __isset_bit_vector.get(__CREATETIME_ISSET_ID); + } + + public void setCreateTimeIsSet(boolean value) { + __isset_bit_vector.set(__CREATETIME_ISSET_ID, value); + } + + public String getLocationUri() { + return this.locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } + + public void unsetLocationUri() { + this.locationUri = null; + } + + /** Returns true if field locationUri is set (has been assigned a value) and false otherwise */ + public boolean isSetLocationUri() { + return this.locationUri != null; + } + + public void setLocationUriIsSet(boolean value) { + if (!value) { + this.locationUri = null; + } + } + + public int getParametersSize() { + return (this.parameters == null) ? 0 : this.parameters.size(); + } + + public void putToParameters(String key, String val) { + if (this.parameters == null) { + this.parameters = new HashMap(); + } + this.parameters.put(key, val); + } + + public Map getParameters() { + return this.parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + public void unsetParameters() { + this.parameters = null; + } + + /** Returns true if field parameters is set (has been assigned a value) and false otherwise */ + public boolean isSetParameters() { + return this.parameters != null; + } + + public void setParametersIsSet(boolean value) { + if (!value) { + this.parameters = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case COMMENT: + if (value == null) { + unsetComment(); + } else { + setComment((String)value); + } + break; + + case CREATE_TIME: + if (value == null) { + unsetCreateTime(); + } else { + setCreateTime((Integer)value); + } + break; + + case LOCATION_URI: + if (value == null) { + unsetLocationUri(); + } else { + setLocationUri((String)value); + } + break; + + case PARAMETERS: + if (value == null) { + unsetParameters(); + } else { + setParameters((Map)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + case COMMENT: + return getComment(); + + case CREATE_TIME: + return Integer.valueOf(getCreateTime()); + + case LOCATION_URI: + return getLocationUri(); + + case PARAMETERS: + return getParameters(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + case COMMENT: + return isSetComment(); + case CREATE_TIME: + return isSetCreateTime(); + case LOCATION_URI: + return isSetLocationUri(); + case PARAMETERS: + return isSetParameters(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof Cluster) + return this.equals((Cluster)that); + return false; + } + + public boolean equals(Cluster that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_comment = true && this.isSetComment(); + boolean that_present_comment = true && that.isSetComment(); + if (this_present_comment || that_present_comment) { + if (!(this_present_comment && that_present_comment)) + return false; + if (!this.comment.equals(that.comment)) + return false; + } + + boolean this_present_createTime = true; + boolean that_present_createTime = true; + if (this_present_createTime || that_present_createTime) { + if (!(this_present_createTime && that_present_createTime)) + return false; + if (this.createTime != that.createTime) + return false; + } + + boolean this_present_locationUri = true && this.isSetLocationUri(); + boolean that_present_locationUri = true && that.isSetLocationUri(); + if (this_present_locationUri || that_present_locationUri) { + if (!(this_present_locationUri && that_present_locationUri)) + return false; + if (!this.locationUri.equals(that.locationUri)) + return false; + } + + boolean this_present_parameters = true && this.isSetParameters(); + boolean that_present_parameters = true && that.isSetParameters(); + if (this_present_parameters || that_present_parameters) { + if (!(this_present_parameters && that_present_parameters)) + return false; + if (!this.parameters.equals(that.parameters)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + public int compareTo(Cluster other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + Cluster typedOther = (Cluster)other; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(typedOther.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, typedOther.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetComment()).compareTo(typedOther.isSetComment()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetComment()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comment, typedOther.comment); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(typedOther.isSetCreateTime()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCreateTime()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, typedOther.createTime); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetLocationUri()).compareTo(typedOther.isSetLocationUri()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLocationUri()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.locationUri, typedOther.locationUri); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParameters()).compareTo(typedOther.isSetParameters()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParameters()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, typedOther.parameters); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField field; + iprot.readStructBegin(); + while (true) + { + field = iprot.readFieldBegin(); + if (field.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (field.id) { + case 1: // NAME + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.name = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 2: // COMMENT + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.comment = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 3: // CREATE_TIME + if (field.type == org.apache.thrift.protocol.TType.I32) { + this.createTime = iprot.readI32(); + setCreateTimeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 4: // LOCATION_URI + if (field.type == org.apache.thrift.protocol.TType.STRING) { + this.locationUri = iprot.readString(); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + case 5: // PARAMETERS + if (field.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map39 = iprot.readMapBegin(); + this.parameters = new HashMap(2*_map39.size); + for (int _i40 = 0; _i40 < _map39.size; ++_i40) + { + String _key41; // required + String _val42; // required + _key41 = iprot.readString(); + _val42 = iprot.readString(); + this.parameters.put(_key41, _val42); + } + iprot.readMapEnd(); + } + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (this.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(this.name); + oprot.writeFieldEnd(); + } + if (this.comment != null) { + oprot.writeFieldBegin(COMMENT_FIELD_DESC); + oprot.writeString(this.comment); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC); + oprot.writeI32(this.createTime); + oprot.writeFieldEnd(); + if (this.locationUri != null) { + oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC); + oprot.writeString(this.locationUri); + oprot.writeFieldEnd(); + } + if (this.parameters != null) { + oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, this.parameters.size())); + for (Map.Entry _iter43 : this.parameters.entrySet()) + { + oprot.writeString(_iter43.getKey()); + oprot.writeString(_iter43.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Cluster("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (!first) sb.append(", "); + sb.append("comment:"); + if (this.comment == null) { + sb.append("null"); + } else { + sb.append(this.comment); + } + first = false; + if (!first) sb.append(", "); + sb.append("createTime:"); + sb.append(this.createTime); + first = false; + if (!first) sb.append(", "); + sb.append("locationUri:"); + if (this.locationUri == null) { + sb.append("null"); + } else { + sb.append(this.locationUri); + } + first = false; + if (!first) sb.append(", "); + sb.append("parameters:"); + if (this.parameters == null) { + sb.append("null"); + } else { + sb.append(this.parameters); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + +} + Index: metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php =================================================================== --- metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (revision 1235046) +++ metastore/src/gen/thrift/gen-php/hive_metastore/ThriftHiveMetastore.php (working copy) @@ -24,6 +24,7 @@ public function get_schema($db_name, $table_name); public function create_table($tbl); public function drop_table($dbname, $name, $deleteData); + public function drop_table_on_cluster($cluster_name, $dbname, $name, $deleteData); public function get_tables($db_name, $pattern); public function get_all_tables($db_name); public function get_table($dbname, $tbl_name); @@ -35,6 +36,7 @@ public function append_partition($db_name, $tbl_name, $part_vals); public function append_partition_by_name($db_name, $tbl_name, $part_name); public function drop_partition($db_name, $tbl_name, $part_vals, $deleteData); + public function drop_partition_on_cluster($cluster_name, $db_name, $tbl_name, $part_vals, $deleteData); public function drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData); public function get_partition($db_name, $tbl_name, $part_vals); public function get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names); @@ -60,6 +62,11 @@ public function get_index_by_name($db_name, $tbl_name, $index_name); public function get_indexes($db_name, $tbl_name, $max_indexes); public function get_index_names($db_name, $tbl_name, $max_indexes); + public function create_cluster($cluster); + public function drop_cluster($cluster_name); + public function get_cluster($cluster_name); + public function list_clusters(); + public function alter_cluster($cluster_name, $cluster); public function create_role($role); public function drop_role($role_name); public function get_role_names(); @@ -882,6 +889,66 @@ return; } + public function drop_table_on_cluster($cluster_name, $dbname, $name, $deleteData) + { + $this->send_drop_table_on_cluster($cluster_name, $dbname, $name, $deleteData); + $this->recv_drop_table_on_cluster(); + } + + public function send_drop_table_on_cluster($cluster_name, $dbname, $name, $deleteData) + { + $args = new ThriftHiveMetastore_drop_table_on_cluster_args(); + $args->cluster_name = $cluster_name; + $args->dbname = $dbname; + $args->name = $name; + $args->deleteData = $deleteData; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_table_on_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_table_on_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_table_on_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_drop_table_on_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_drop_table_on_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + public function get_tables($db_name, $pattern) { $this->send_get_tables($db_name, $pattern); @@ -1532,6 +1599,70 @@ throw new Exception("drop_partition failed: unknown result"); } + public function drop_partition_on_cluster($cluster_name, $db_name, $tbl_name, $part_vals, $deleteData) + { + $this->send_drop_partition_on_cluster($cluster_name, $db_name, $tbl_name, $part_vals, $deleteData); + return $this->recv_drop_partition_on_cluster(); + } + + public function send_drop_partition_on_cluster($cluster_name, $db_name, $tbl_name, $part_vals, $deleteData) + { + $args = new ThriftHiveMetastore_drop_partition_on_cluster_args(); + $args->cluster_name = $cluster_name; + $args->db_name = $db_name; + $args->tbl_name = $tbl_name; + $args->part_vals = $part_vals; + $args->deleteData = $deleteData; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_partition_on_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_partition_on_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_partition_on_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_drop_partition_on_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_drop_partition_on_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + throw new Exception("drop_partition_on_cluster failed: unknown result"); + } + public function drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData) { $this->send_drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData); @@ -3017,6 +3148,291 @@ throw new Exception("get_index_names failed: unknown result"); } + public function create_cluster($cluster) + { + $this->send_create_cluster($cluster); + $this->recv_create_cluster(); + } + + public function send_create_cluster($cluster) + { + $args = new ThriftHiveMetastore_create_cluster_args(); + $args->cluster = $cluster; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_create_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_create_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_create_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + + public function drop_cluster($cluster_name) + { + $this->send_drop_cluster($cluster_name); + $this->recv_drop_cluster(); + } + + public function send_drop_cluster($cluster_name) + { + $args = new ThriftHiveMetastore_drop_cluster_args(); + $args->cluster_name = $cluster_name; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_drop_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_drop_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + + public function get_cluster($cluster_name) + { + $this->send_get_cluster($cluster_name); + return $this->recv_get_cluster(); + } + + public function send_get_cluster($cluster_name) + { + $args = new ThriftHiveMetastore_get_cluster_args(); + $args->cluster_name = $cluster_name; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_get_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_get_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new Exception("get_cluster failed: unknown result"); + } + + public function list_clusters() + { + $this->send_list_clusters(); + return $this->recv_list_clusters(); + } + + public function send_list_clusters() + { + $args = new ThriftHiveMetastore_list_clusters_args(); + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'list_clusters', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('list_clusters', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_list_clusters() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_list_clusters_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_list_clusters_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new Exception("list_clusters failed: unknown result"); + } + + public function alter_cluster($cluster_name, $cluster) + { + $this->send_alter_cluster($cluster_name, $cluster); + $this->recv_alter_cluster(); + } + + public function send_alter_cluster($cluster_name, $cluster) + { + $args = new ThriftHiveMetastore_alter_cluster_args(); + $args->cluster_name = $cluster_name; + $args->cluster = $cluster; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_cluster', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_cluster', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_alter_cluster() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 'ThriftHiveMetastore_alter_cluster_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new ThriftHiveMetastore_alter_cluster_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + return; + } + public function create_role($role) { $this->send_create_role($role); @@ -4533,14 +4949,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size174 = 0; - $_etype177 = 0; - $xfer += $input->readListBegin($_etype177, $_size174); - for ($_i178 = 0; $_i178 < $_size174; ++$_i178) + $_size199 = 0; + $_etype202 = 0; + $xfer += $input->readListBegin($_etype202, $_size199); + for ($_i203 = 0; $_i203 < $_size199; ++$_i203) { - $elem179 = null; - $xfer += $input->readString($elem179); - $this->success []= $elem179; + $elem204 = null; + $xfer += $input->readString($elem204); + $this->success []= $elem204; } $xfer += $input->readListEnd(); } else { @@ -4576,9 +4992,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter180) + foreach ($this->success as $iter205) { - $xfer += $output->writeString($iter180); + $xfer += $output->writeString($iter205); } } $output->writeListEnd(); @@ -4703,14 +5119,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size181 = 0; - $_etype184 = 0; - $xfer += $input->readListBegin($_etype184, $_size181); - for ($_i185 = 0; $_i185 < $_size181; ++$_i185) + $_size206 = 0; + $_etype209 = 0; + $xfer += $input->readListBegin($_etype209, $_size206); + for ($_i210 = 0; $_i210 < $_size206; ++$_i210) { - $elem186 = null; - $xfer += $input->readString($elem186); - $this->success []= $elem186; + $elem211 = null; + $xfer += $input->readString($elem211); + $this->success []= $elem211; } $xfer += $input->readListEnd(); } else { @@ -4746,9 +5162,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter187) + foreach ($this->success as $iter212) { - $xfer += $output->writeString($iter187); + $xfer += $output->writeString($iter212); } } $output->writeListEnd(); @@ -5689,18 +6105,18 @@ case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size188 = 0; - $_ktype189 = 0; - $_vtype190 = 0; - $xfer += $input->readMapBegin($_ktype189, $_vtype190, $_size188); - for ($_i192 = 0; $_i192 < $_size188; ++$_i192) + $_size213 = 0; + $_ktype214 = 0; + $_vtype215 = 0; + $xfer += $input->readMapBegin($_ktype214, $_vtype215, $_size213); + for ($_i217 = 0; $_i217 < $_size213; ++$_i217) { - $key193 = ''; - $val194 = new Type(); - $xfer += $input->readString($key193); - $val194 = new Type(); - $xfer += $val194->read($input); - $this->success[$key193] = $val194; + $key218 = ''; + $val219 = new Type(); + $xfer += $input->readString($key218); + $val219 = new Type(); + $xfer += $val219->read($input); + $this->success[$key218] = $val219; } $xfer += $input->readMapEnd(); } else { @@ -5736,10 +6152,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter195 => $viter196) + foreach ($this->success as $kiter220 => $viter221) { - $xfer += $output->writeString($kiter195); - $xfer += $viter196->write($output); + $xfer += $output->writeString($kiter220); + $xfer += $viter221->write($output); } } $output->writeMapEnd(); @@ -5925,15 +6341,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size197 = 0; - $_etype200 = 0; - $xfer += $input->readListBegin($_etype200, $_size197); - for ($_i201 = 0; $_i201 < $_size197; ++$_i201) + $_size222 = 0; + $_etype225 = 0; + $xfer += $input->readListBegin($_etype225, $_size222); + for ($_i226 = 0; $_i226 < $_size222; ++$_i226) { - $elem202 = null; - $elem202 = new FieldSchema(); - $xfer += $elem202->read($input); - $this->success []= $elem202; + $elem227 = null; + $elem227 = new FieldSchema(); + $xfer += $elem227->read($input); + $this->success []= $elem227; } $xfer += $input->readListEnd(); } else { @@ -5985,9 +6401,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter203) + foreach ($this->success as $iter228) { - $xfer += $iter203->write($output); + $xfer += $iter228->write($output); } } $output->writeListEnd(); @@ -6183,15 +6599,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size204 = 0; - $_etype207 = 0; - $xfer += $input->readListBegin($_etype207, $_size204); - for ($_i208 = 0; $_i208 < $_size204; ++$_i208) + $_size229 = 0; + $_etype232 = 0; + $xfer += $input->readListBegin($_etype232, $_size229); + for ($_i233 = 0; $_i233 < $_size229; ++$_i233) { - $elem209 = null; - $elem209 = new FieldSchema(); - $xfer += $elem209->read($input); - $this->success []= $elem209; + $elem234 = null; + $elem234 = new FieldSchema(); + $xfer += $elem234->read($input); + $this->success []= $elem234; } $xfer += $input->readListEnd(); } else { @@ -6243,9 +6659,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter210) + foreach ($this->success as $iter235) { - $xfer += $iter210->write($output); + $xfer += $iter235->write($output); } } $output->writeListEnd(); @@ -6699,6 +7115,256 @@ } +class ThriftHiveMetastore_drop_table_on_cluster_args { + static $_TSPEC; + + public $cluster_name = null; + public $dbname = null; + public $name = null; + public $deleteData = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster_name'])) { + $this->cluster_name = $vals['cluster_name']; + } + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_table_on_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cluster_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_on_cluster_args'); + if ($this->cluster_name !== null) { + $xfer += $output->writeFieldBegin('cluster_name', TType::STRING, 1); + $xfer += $output->writeString($this->cluster_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->dbname !== null) { + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 2); + $xfer += $output->writeString($this->dbname); + $xfer += $output->writeFieldEnd(); + } + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 3); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 4); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_table_on_cluster_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'InvalidOperationException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_table_on_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new InvalidOperationException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_on_cluster_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_tables_args { static $_TSPEC; @@ -6847,14 +7513,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size211 = 0; - $_etype214 = 0; - $xfer += $input->readListBegin($_etype214, $_size211); - for ($_i215 = 0; $_i215 < $_size211; ++$_i215) + $_size236 = 0; + $_etype239 = 0; + $xfer += $input->readListBegin($_etype239, $_size236); + for ($_i240 = 0; $_i240 < $_size236; ++$_i240) { - $elem216 = null; - $xfer += $input->readString($elem216); - $this->success []= $elem216; + $elem241 = null; + $xfer += $input->readString($elem241); + $this->success []= $elem241; } $xfer += $input->readListEnd(); } else { @@ -6890,9 +7556,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter217) + foreach ($this->success as $iter242) { - $xfer += $output->writeString($iter217); + $xfer += $output->writeString($iter242); } } $output->writeListEnd(); @@ -7039,14 +7705,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size218 = 0; - $_etype221 = 0; - $xfer += $input->readListBegin($_etype221, $_size218); - for ($_i222 = 0; $_i222 < $_size218; ++$_i222) + $_size243 = 0; + $_etype246 = 0; + $xfer += $input->readListBegin($_etype246, $_size243); + for ($_i247 = 0; $_i247 < $_size243; ++$_i247) { - $elem223 = null; - $xfer += $input->readString($elem223); - $this->success []= $elem223; + $elem248 = null; + $xfer += $input->readString($elem248); + $this->success []= $elem248; } $xfer += $input->readListEnd(); } else { @@ -7082,9 +7748,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter224) + foreach ($this->success as $iter249) { - $xfer += $output->writeString($iter224); + $xfer += $output->writeString($iter249); } } $output->writeListEnd(); @@ -7378,14 +8044,14 @@ case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size225 = 0; - $_etype228 = 0; - $xfer += $input->readListBegin($_etype228, $_size225); - for ($_i229 = 0; $_i229 < $_size225; ++$_i229) + $_size250 = 0; + $_etype253 = 0; + $xfer += $input->readListBegin($_etype253, $_size250); + for ($_i254 = 0; $_i254 < $_size250; ++$_i254) { - $elem230 = null; - $xfer += $input->readString($elem230); - $this->tbl_names []= $elem230; + $elem255 = null; + $xfer += $input->readString($elem255); + $this->tbl_names []= $elem255; } $xfer += $input->readListEnd(); } else { @@ -7418,9 +8084,9 @@ { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter231) + foreach ($this->tbl_names as $iter256) { - $xfer += $output->writeString($iter231); + $xfer += $output->writeString($iter256); } } $output->writeListEnd(); @@ -7509,15 +8175,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size232 = 0; - $_etype235 = 0; - $xfer += $input->readListBegin($_etype235, $_size232); - for ($_i236 = 0; $_i236 < $_size232; ++$_i236) + $_size257 = 0; + $_etype260 = 0; + $xfer += $input->readListBegin($_etype260, $_size257); + for ($_i261 = 0; $_i261 < $_size257; ++$_i261) { - $elem237 = null; - $elem237 = new Table(); - $xfer += $elem237->read($input); - $this->success []= $elem237; + $elem262 = null; + $elem262 = new Table(); + $xfer += $elem262->read($input); + $this->success []= $elem262; } $xfer += $input->readListEnd(); } else { @@ -7569,9 +8235,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter238) + foreach ($this->success as $iter263) { - $xfer += $iter238->write($output); + $xfer += $iter263->write($output); } } $output->writeListEnd(); @@ -7786,14 +8452,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size239 = 0; - $_etype242 = 0; - $xfer += $input->readListBegin($_etype242, $_size239); - for ($_i243 = 0; $_i243 < $_size239; ++$_i243) + $_size264 = 0; + $_etype267 = 0; + $xfer += $input->readListBegin($_etype267, $_size264); + for ($_i268 = 0; $_i268 < $_size264; ++$_i268) { - $elem244 = null; - $xfer += $input->readString($elem244); - $this->success []= $elem244; + $elem269 = null; + $xfer += $input->readString($elem269); + $this->success []= $elem269; } $xfer += $input->readListEnd(); } else { @@ -7845,9 +8511,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter245) + foreach ($this->success as $iter270) { - $xfer += $output->writeString($iter245); + $xfer += $output->writeString($iter270); } } $output->writeListEnd(); @@ -8357,15 +9023,15 @@ case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size246 = 0; - $_etype249 = 0; - $xfer += $input->readListBegin($_etype249, $_size246); - for ($_i250 = 0; $_i250 < $_size246; ++$_i250) + $_size271 = 0; + $_etype274 = 0; + $xfer += $input->readListBegin($_etype274, $_size271); + for ($_i275 = 0; $_i275 < $_size271; ++$_i275) { - $elem251 = null; - $elem251 = new Partition(); - $xfer += $elem251->read($input); - $this->new_parts []= $elem251; + $elem276 = null; + $elem276 = new Partition(); + $xfer += $elem276->read($input); + $this->new_parts []= $elem276; } $xfer += $input->readListEnd(); } else { @@ -8393,9 +9059,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter252) + foreach ($this->new_parts as $iter277) { - $xfer += $iter252->write($output); + $xfer += $iter277->write($output); } } $output->writeListEnd(); @@ -8624,14 +9290,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size253 = 0; - $_etype256 = 0; - $xfer += $input->readListBegin($_etype256, $_size253); - for ($_i257 = 0; $_i257 < $_size253; ++$_i257) + $_size278 = 0; + $_etype281 = 0; + $xfer += $input->readListBegin($_etype281, $_size278); + for ($_i282 = 0; $_i282 < $_size278; ++$_i282) { - $elem258 = null; - $xfer += $input->readString($elem258); - $this->part_vals []= $elem258; + $elem283 = null; + $xfer += $input->readString($elem283); + $this->part_vals []= $elem283; } $xfer += $input->readListEnd(); } else { @@ -8669,9 +9335,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter259) + foreach ($this->part_vals as $iter284) { - $xfer += $output->writeString($iter259); + $xfer += $output->writeString($iter284); } } $output->writeListEnd(); @@ -9168,14 +9834,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size260 = 0; - $_etype263 = 0; - $xfer += $input->readListBegin($_etype263, $_size260); - for ($_i264 = 0; $_i264 < $_size260; ++$_i264) + $_size285 = 0; + $_etype288 = 0; + $xfer += $input->readListBegin($_etype288, $_size285); + for ($_i289 = 0; $_i289 < $_size285; ++$_i289) { - $elem265 = null; - $xfer += $input->readString($elem265); - $this->part_vals []= $elem265; + $elem290 = null; + $xfer += $input->readString($elem290); + $this->part_vals []= $elem290; } $xfer += $input->readListEnd(); } else { @@ -9220,9 +9886,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter266) + foreach ($this->part_vals as $iter291) { - $xfer += $output->writeString($iter266); + $xfer += $output->writeString($iter291); } } $output->writeListEnd(); @@ -9357,6 +10023,322 @@ } +class ThriftHiveMetastore_drop_partition_on_cluster_args { + static $_TSPEC; + + public $cluster_name = null; + public $db_name = null; + public $tbl_name = null; + public $part_vals = null; + public $deleteData = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'part_vals', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 5 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster_name'])) { + $this->cluster_name = $vals['cluster_name']; + } + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['part_vals'])) { + $this->part_vals = $vals['part_vals']; + } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_partition_on_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cluster_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::LST) { + $this->part_vals = array(); + $_size292 = 0; + $_etype295 = 0; + $xfer += $input->readListBegin($_etype295, $_size292); + for ($_i296 = 0; $_i296 < $_size292; ++$_i296) + { + $elem297 = null; + $xfer += $input->readString($elem297); + $this->part_vals []= $elem297; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_partition_on_cluster_args'); + if ($this->cluster_name !== null) { + $xfer += $output->writeFieldBegin('cluster_name', TType::STRING, 1); + $xfer += $output->writeString($this->cluster_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->part_vals !== null) { + if (!is_array($this->part_vals)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('part_vals', TType::LST, 4); + { + $output->writeListBegin(TType::STRING, count($this->part_vals)); + { + foreach ($this->part_vals as $iter298) + { + $xfer += $output->writeString($iter298); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 5); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_partition_on_cluster_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + public $o2 = null; + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::BOOL, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'InvalidOperationException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_partition_on_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new InvalidOperationException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_partition_on_cluster_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::BOOL, 0); + $xfer += $output->writeBool($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_drop_partition_by_name_args { static $_TSPEC; @@ -9682,14 +10664,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size267 = 0; - $_etype270 = 0; - $xfer += $input->readListBegin($_etype270, $_size267); - for ($_i271 = 0; $_i271 < $_size267; ++$_i271) + $_size299 = 0; + $_etype302 = 0; + $xfer += $input->readListBegin($_etype302, $_size299); + for ($_i303 = 0; $_i303 < $_size299; ++$_i303) { - $elem272 = null; - $xfer += $input->readString($elem272); - $this->part_vals []= $elem272; + $elem304 = null; + $xfer += $input->readString($elem304); + $this->part_vals []= $elem304; } $xfer += $input->readListEnd(); } else { @@ -9727,9 +10709,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter273) + foreach ($this->part_vals as $iter305) { - $xfer += $output->writeString($iter273); + $xfer += $output->writeString($iter305); } } $output->writeListEnd(); @@ -9961,14 +10943,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size274 = 0; - $_etype277 = 0; - $xfer += $input->readListBegin($_etype277, $_size274); - for ($_i278 = 0; $_i278 < $_size274; ++$_i278) + $_size306 = 0; + $_etype309 = 0; + $xfer += $input->readListBegin($_etype309, $_size306); + for ($_i310 = 0; $_i310 < $_size306; ++$_i310) { - $elem279 = null; - $xfer += $input->readString($elem279); - $this->part_vals []= $elem279; + $elem311 = null; + $xfer += $input->readString($elem311); + $this->part_vals []= $elem311; } $xfer += $input->readListEnd(); } else { @@ -9985,14 +10967,14 @@ case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size280 = 0; - $_etype283 = 0; - $xfer += $input->readListBegin($_etype283, $_size280); - for ($_i284 = 0; $_i284 < $_size280; ++$_i284) + $_size312 = 0; + $_etype315 = 0; + $xfer += $input->readListBegin($_etype315, $_size312); + for ($_i316 = 0; $_i316 < $_size312; ++$_i316) { - $elem285 = null; - $xfer += $input->readString($elem285); - $this->group_names []= $elem285; + $elem317 = null; + $xfer += $input->readString($elem317); + $this->group_names []= $elem317; } $xfer += $input->readListEnd(); } else { @@ -10030,9 +11012,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter286) + foreach ($this->part_vals as $iter318) { - $xfer += $output->writeString($iter286); + $xfer += $output->writeString($iter318); } } $output->writeListEnd(); @@ -10052,9 +11034,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter287) + foreach ($this->group_names as $iter319) { - $xfer += $output->writeString($iter287); + $xfer += $output->writeString($iter319); } } $output->writeListEnd(); @@ -10600,15 +11582,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size288 = 0; - $_etype291 = 0; - $xfer += $input->readListBegin($_etype291, $_size288); - for ($_i292 = 0; $_i292 < $_size288; ++$_i292) + $_size320 = 0; + $_etype323 = 0; + $xfer += $input->readListBegin($_etype323, $_size320); + for ($_i324 = 0; $_i324 < $_size320; ++$_i324) { - $elem293 = null; - $elem293 = new Partition(); - $xfer += $elem293->read($input); - $this->success []= $elem293; + $elem325 = null; + $elem325 = new Partition(); + $xfer += $elem325->read($input); + $this->success []= $elem325; } $xfer += $input->readListEnd(); } else { @@ -10652,9 +11634,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter294) + foreach ($this->success as $iter326) { - $xfer += $iter294->write($output); + $xfer += $iter326->write($output); } } $output->writeListEnd(); @@ -10785,14 +11767,14 @@ case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size295 = 0; - $_etype298 = 0; - $xfer += $input->readListBegin($_etype298, $_size295); - for ($_i299 = 0; $_i299 < $_size295; ++$_i299) + $_size327 = 0; + $_etype330 = 0; + $xfer += $input->readListBegin($_etype330, $_size327); + for ($_i331 = 0; $_i331 < $_size327; ++$_i331) { - $elem300 = null; - $xfer += $input->readString($elem300); - $this->group_names []= $elem300; + $elem332 = null; + $xfer += $input->readString($elem332); + $this->group_names []= $elem332; } $xfer += $input->readListEnd(); } else { @@ -10840,9 +11822,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter301) + foreach ($this->group_names as $iter333) { - $xfer += $output->writeString($iter301); + $xfer += $output->writeString($iter333); } } $output->writeListEnd(); @@ -10922,15 +11904,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size302 = 0; - $_etype305 = 0; - $xfer += $input->readListBegin($_etype305, $_size302); - for ($_i306 = 0; $_i306 < $_size302; ++$_i306) + $_size334 = 0; + $_etype337 = 0; + $xfer += $input->readListBegin($_etype337, $_size334); + for ($_i338 = 0; $_i338 < $_size334; ++$_i338) { - $elem307 = null; - $elem307 = new Partition(); - $xfer += $elem307->read($input); - $this->success []= $elem307; + $elem339 = null; + $elem339 = new Partition(); + $xfer += $elem339->read($input); + $this->success []= $elem339; } $xfer += $input->readListEnd(); } else { @@ -10974,9 +11956,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter308) + foreach ($this->success as $iter340) { - $xfer += $iter308->write($output); + $xfer += $iter340->write($output); } } $output->writeListEnd(); @@ -11168,14 +12150,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size309 = 0; - $_etype312 = 0; - $xfer += $input->readListBegin($_etype312, $_size309); - for ($_i313 = 0; $_i313 < $_size309; ++$_i313) + $_size341 = 0; + $_etype344 = 0; + $xfer += $input->readListBegin($_etype344, $_size341); + for ($_i345 = 0; $_i345 < $_size341; ++$_i345) { - $elem314 = null; - $xfer += $input->readString($elem314); - $this->success []= $elem314; + $elem346 = null; + $xfer += $input->readString($elem346); + $this->success []= $elem346; } $xfer += $input->readListEnd(); } else { @@ -11211,9 +12193,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter315) + foreach ($this->success as $iter347) { - $xfer += $output->writeString($iter315); + $xfer += $output->writeString($iter347); } } $output->writeListEnd(); @@ -11317,14 +12299,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size316 = 0; - $_etype319 = 0; - $xfer += $input->readListBegin($_etype319, $_size316); - for ($_i320 = 0; $_i320 < $_size316; ++$_i320) + $_size348 = 0; + $_etype351 = 0; + $xfer += $input->readListBegin($_etype351, $_size348); + for ($_i352 = 0; $_i352 < $_size348; ++$_i352) { - $elem321 = null; - $xfer += $input->readString($elem321); - $this->part_vals []= $elem321; + $elem353 = null; + $xfer += $input->readString($elem353); + $this->part_vals []= $elem353; } $xfer += $input->readListEnd(); } else { @@ -11369,9 +12351,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter322) + foreach ($this->part_vals as $iter354) { - $xfer += $output->writeString($iter322); + $xfer += $output->writeString($iter354); } } $output->writeListEnd(); @@ -11456,15 +12438,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size323 = 0; - $_etype326 = 0; - $xfer += $input->readListBegin($_etype326, $_size323); - for ($_i327 = 0; $_i327 < $_size323; ++$_i327) + $_size355 = 0; + $_etype358 = 0; + $xfer += $input->readListBegin($_etype358, $_size355); + for ($_i359 = 0; $_i359 < $_size355; ++$_i359) { - $elem328 = null; - $elem328 = new Partition(); - $xfer += $elem328->read($input); - $this->success []= $elem328; + $elem360 = null; + $elem360 = new Partition(); + $xfer += $elem360->read($input); + $this->success []= $elem360; } $xfer += $input->readListEnd(); } else { @@ -11508,9 +12490,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter329) + foreach ($this->success as $iter361) { - $xfer += $iter329->write($output); + $xfer += $iter361->write($output); } } $output->writeListEnd(); @@ -11639,14 +12621,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size330 = 0; - $_etype333 = 0; - $xfer += $input->readListBegin($_etype333, $_size330); - for ($_i334 = 0; $_i334 < $_size330; ++$_i334) + $_size362 = 0; + $_etype365 = 0; + $xfer += $input->readListBegin($_etype365, $_size362); + for ($_i366 = 0; $_i366 < $_size362; ++$_i366) { - $elem335 = null; - $xfer += $input->readString($elem335); - $this->part_vals []= $elem335; + $elem367 = null; + $xfer += $input->readString($elem367); + $this->part_vals []= $elem367; } $xfer += $input->readListEnd(); } else { @@ -11670,14 +12652,14 @@ case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size336 = 0; - $_etype339 = 0; - $xfer += $input->readListBegin($_etype339, $_size336); - for ($_i340 = 0; $_i340 < $_size336; ++$_i340) + $_size368 = 0; + $_etype371 = 0; + $xfer += $input->readListBegin($_etype371, $_size368); + for ($_i372 = 0; $_i372 < $_size368; ++$_i372) { - $elem341 = null; - $xfer += $input->readString($elem341); - $this->group_names []= $elem341; + $elem373 = null; + $xfer += $input->readString($elem373); + $this->group_names []= $elem373; } $xfer += $input->readListEnd(); } else { @@ -11715,9 +12697,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter342) + foreach ($this->part_vals as $iter374) { - $xfer += $output->writeString($iter342); + $xfer += $output->writeString($iter374); } } $output->writeListEnd(); @@ -11742,9 +12724,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter343) + foreach ($this->group_names as $iter375) { - $xfer += $output->writeString($iter343); + $xfer += $output->writeString($iter375); } } $output->writeListEnd(); @@ -11824,15 +12806,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size344 = 0; - $_etype347 = 0; - $xfer += $input->readListBegin($_etype347, $_size344); - for ($_i348 = 0; $_i348 < $_size344; ++$_i348) + $_size376 = 0; + $_etype379 = 0; + $xfer += $input->readListBegin($_etype379, $_size376); + for ($_i380 = 0; $_i380 < $_size376; ++$_i380) { - $elem349 = null; - $elem349 = new Partition(); - $xfer += $elem349->read($input); - $this->success []= $elem349; + $elem381 = null; + $elem381 = new Partition(); + $xfer += $elem381->read($input); + $this->success []= $elem381; } $xfer += $input->readListEnd(); } else { @@ -11876,9 +12858,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter350) + foreach ($this->success as $iter382) { - $xfer += $iter350->write($output); + $xfer += $iter382->write($output); } } $output->writeListEnd(); @@ -11987,14 +12969,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size351 = 0; - $_etype354 = 0; - $xfer += $input->readListBegin($_etype354, $_size351); - for ($_i355 = 0; $_i355 < $_size351; ++$_i355) + $_size383 = 0; + $_etype386 = 0; + $xfer += $input->readListBegin($_etype386, $_size383); + for ($_i387 = 0; $_i387 < $_size383; ++$_i387) { - $elem356 = null; - $xfer += $input->readString($elem356); - $this->part_vals []= $elem356; + $elem388 = null; + $xfer += $input->readString($elem388); + $this->part_vals []= $elem388; } $xfer += $input->readListEnd(); } else { @@ -12039,9 +13021,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter357) + foreach ($this->part_vals as $iter389) { - $xfer += $output->writeString($iter357); + $xfer += $output->writeString($iter389); } } $output->writeListEnd(); @@ -12125,14 +13107,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size358 = 0; - $_etype361 = 0; - $xfer += $input->readListBegin($_etype361, $_size358); - for ($_i362 = 0; $_i362 < $_size358; ++$_i362) + $_size390 = 0; + $_etype393 = 0; + $xfer += $input->readListBegin($_etype393, $_size390); + for ($_i394 = 0; $_i394 < $_size390; ++$_i394) { - $elem363 = null; - $xfer += $input->readString($elem363); - $this->success []= $elem363; + $elem395 = null; + $xfer += $input->readString($elem395); + $this->success []= $elem395; } $xfer += $input->readListEnd(); } else { @@ -12176,9 +13158,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter364) + foreach ($this->success as $iter396) { - $xfer += $output->writeString($iter364); + $xfer += $output->writeString($iter396); } } $output->writeListEnd(); @@ -12400,15 +13382,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size365 = 0; - $_etype368 = 0; - $xfer += $input->readListBegin($_etype368, $_size365); - for ($_i369 = 0; $_i369 < $_size365; ++$_i369) + $_size397 = 0; + $_etype400 = 0; + $xfer += $input->readListBegin($_etype400, $_size397); + for ($_i401 = 0; $_i401 < $_size397; ++$_i401) { - $elem370 = null; - $elem370 = new Partition(); - $xfer += $elem370->read($input); - $this->success []= $elem370; + $elem402 = null; + $elem402 = new Partition(); + $xfer += $elem402->read($input); + $this->success []= $elem402; } $xfer += $input->readListEnd(); } else { @@ -12452,9 +13434,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter371) + foreach ($this->success as $iter403) { - $xfer += $iter371->write($output); + $xfer += $iter403->write($output); } } $output->writeListEnd(); @@ -12555,14 +13537,14 @@ case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size372 = 0; - $_etype375 = 0; - $xfer += $input->readListBegin($_etype375, $_size372); - for ($_i376 = 0; $_i376 < $_size372; ++$_i376) + $_size404 = 0; + $_etype407 = 0; + $xfer += $input->readListBegin($_etype407, $_size404); + for ($_i408 = 0; $_i408 < $_size404; ++$_i408) { - $elem377 = null; - $xfer += $input->readString($elem377); - $this->names []= $elem377; + $elem409 = null; + $xfer += $input->readString($elem409); + $this->names []= $elem409; } $xfer += $input->readListEnd(); } else { @@ -12600,9 +13582,9 @@ { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter378) + foreach ($this->names as $iter410) { - $xfer += $output->writeString($iter378); + $xfer += $output->writeString($iter410); } } $output->writeListEnd(); @@ -12682,15 +13664,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size379 = 0; - $_etype382 = 0; - $xfer += $input->readListBegin($_etype382, $_size379); - for ($_i383 = 0; $_i383 < $_size379; ++$_i383) + $_size411 = 0; + $_etype414 = 0; + $xfer += $input->readListBegin($_etype414, $_size411); + for ($_i415 = 0; $_i415 < $_size411; ++$_i415) { - $elem384 = null; - $elem384 = new Partition(); - $xfer += $elem384->read($input); - $this->success []= $elem384; + $elem416 = null; + $elem416 = new Partition(); + $xfer += $elem416->read($input); + $this->success []= $elem416; } $xfer += $input->readListEnd(); } else { @@ -12734,9 +13716,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter385) + foreach ($this->success as $iter417) { - $xfer += $iter385->write($output); + $xfer += $iter417->write($output); } } $output->writeListEnd(); @@ -13059,14 +14041,14 @@ case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size386 = 0; - $_etype389 = 0; - $xfer += $input->readListBegin($_etype389, $_size386); - for ($_i390 = 0; $_i390 < $_size386; ++$_i390) + $_size418 = 0; + $_etype421 = 0; + $xfer += $input->readListBegin($_etype421, $_size418); + for ($_i422 = 0; $_i422 < $_size418; ++$_i422) { - $elem391 = null; - $xfer += $input->readString($elem391); - $this->part_vals []= $elem391; + $elem423 = null; + $xfer += $input->readString($elem423); + $this->part_vals []= $elem423; } $xfer += $input->readListEnd(); } else { @@ -13112,9 +14094,9 @@ { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter392) + foreach ($this->part_vals as $iter424) { - $xfer += $output->writeString($iter392); + $xfer += $output->writeString($iter424); } } $output->writeListEnd(); @@ -13546,14 +14528,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size393 = 0; - $_etype396 = 0; - $xfer += $input->readListBegin($_etype396, $_size393); - for ($_i397 = 0; $_i397 < $_size393; ++$_i397) + $_size425 = 0; + $_etype428 = 0; + $xfer += $input->readListBegin($_etype428, $_size425); + for ($_i429 = 0; $_i429 < $_size425; ++$_i429) { - $elem398 = null; - $xfer += $input->readString($elem398); - $this->success []= $elem398; + $elem430 = null; + $xfer += $input->readString($elem430); + $this->success []= $elem430; } $xfer += $input->readListEnd(); } else { @@ -13589,9 +14571,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter399) + foreach ($this->success as $iter431) { - $xfer += $output->writeString($iter399); + $xfer += $output->writeString($iter431); } } $output->writeListEnd(); @@ -13742,17 +14724,17 @@ case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size400 = 0; - $_ktype401 = 0; - $_vtype402 = 0; - $xfer += $input->readMapBegin($_ktype401, $_vtype402, $_size400); - for ($_i404 = 0; $_i404 < $_size400; ++$_i404) + $_size432 = 0; + $_ktype433 = 0; + $_vtype434 = 0; + $xfer += $input->readMapBegin($_ktype433, $_vtype434, $_size432); + for ($_i436 = 0; $_i436 < $_size432; ++$_i436) { - $key405 = ''; - $val406 = ''; - $xfer += $input->readString($key405); - $xfer += $input->readString($val406); - $this->success[$key405] = $val406; + $key437 = ''; + $val438 = ''; + $xfer += $input->readString($key437); + $xfer += $input->readString($val438); + $this->success[$key437] = $val438; } $xfer += $input->readMapEnd(); } else { @@ -13788,10 +14770,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter407 => $viter408) + foreach ($this->success as $kiter439 => $viter440) { - $xfer += $output->writeString($kiter407); - $xfer += $output->writeString($viter408); + $xfer += $output->writeString($kiter439); + $xfer += $output->writeString($viter440); } } $output->writeMapEnd(); @@ -13899,17 +14881,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size409 = 0; - $_ktype410 = 0; - $_vtype411 = 0; - $xfer += $input->readMapBegin($_ktype410, $_vtype411, $_size409); - for ($_i413 = 0; $_i413 < $_size409; ++$_i413) + $_size441 = 0; + $_ktype442 = 0; + $_vtype443 = 0; + $xfer += $input->readMapBegin($_ktype442, $_vtype443, $_size441); + for ($_i445 = 0; $_i445 < $_size441; ++$_i445) { - $key414 = ''; - $val415 = ''; - $xfer += $input->readString($key414); - $xfer += $input->readString($val415); - $this->part_vals[$key414] = $val415; + $key446 = ''; + $val447 = ''; + $xfer += $input->readString($key446); + $xfer += $input->readString($val447); + $this->part_vals[$key446] = $val447; } $xfer += $input->readMapEnd(); } else { @@ -13954,10 +14936,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter416 => $viter417) + foreach ($this->part_vals as $kiter448 => $viter449) { - $xfer += $output->writeString($kiter416); - $xfer += $output->writeString($viter417); + $xfer += $output->writeString($kiter448); + $xfer += $output->writeString($viter449); } } $output->writeMapEnd(); @@ -14249,17 +15231,17 @@ case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size418 = 0; - $_ktype419 = 0; - $_vtype420 = 0; - $xfer += $input->readMapBegin($_ktype419, $_vtype420, $_size418); - for ($_i422 = 0; $_i422 < $_size418; ++$_i422) + $_size450 = 0; + $_ktype451 = 0; + $_vtype452 = 0; + $xfer += $input->readMapBegin($_ktype451, $_vtype452, $_size450); + for ($_i454 = 0; $_i454 < $_size450; ++$_i454) { - $key423 = ''; - $val424 = ''; - $xfer += $input->readString($key423); - $xfer += $input->readString($val424); - $this->part_vals[$key423] = $val424; + $key455 = ''; + $val456 = ''; + $xfer += $input->readString($key455); + $xfer += $input->readString($val456); + $this->part_vals[$key455] = $val456; } $xfer += $input->readMapEnd(); } else { @@ -14304,10 +15286,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter425 => $viter426) + foreach ($this->part_vals as $kiter457 => $viter458) { - $xfer += $output->writeString($kiter425); - $xfer += $output->writeString($viter426); + $xfer += $output->writeString($kiter457); + $xfer += $output->writeString($viter458); } } $output->writeMapEnd(); @@ -15667,15 +16649,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size427 = 0; - $_etype430 = 0; - $xfer += $input->readListBegin($_etype430, $_size427); - for ($_i431 = 0; $_i431 < $_size427; ++$_i431) + $_size459 = 0; + $_etype462 = 0; + $xfer += $input->readListBegin($_etype462, $_size459); + for ($_i463 = 0; $_i463 < $_size459; ++$_i463) { - $elem432 = null; - $elem432 = new Index(); - $xfer += $elem432->read($input); - $this->success []= $elem432; + $elem464 = null; + $elem464 = new Index(); + $xfer += $elem464->read($input); + $this->success []= $elem464; } $xfer += $input->readListEnd(); } else { @@ -15719,9 +16701,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter433) + foreach ($this->success as $iter465) { - $xfer += $iter433->write($output); + $xfer += $iter465->write($output); } } $output->writeListEnd(); @@ -15913,14 +16895,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size434 = 0; - $_etype437 = 0; - $xfer += $input->readListBegin($_etype437, $_size434); - for ($_i438 = 0; $_i438 < $_size434; ++$_i438) + $_size466 = 0; + $_etype469 = 0; + $xfer += $input->readListBegin($_etype469, $_size466); + for ($_i470 = 0; $_i470 < $_size466; ++$_i470) { - $elem439 = null; - $xfer += $input->readString($elem439); - $this->success []= $elem439; + $elem471 = null; + $xfer += $input->readString($elem471); + $this->success []= $elem471; } $xfer += $input->readListEnd(); } else { @@ -15956,9 +16938,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter440) + foreach ($this->success as $iter472) { - $xfer += $output->writeString($iter440); + $xfer += $output->writeString($iter472); } } $output->writeListEnd(); @@ -15977,6 +16959,993 @@ } +class ThriftHiveMetastore_create_cluster_args { + static $_TSPEC; + + public $cluster = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster', + 'type' => TType::STRUCT, + 'class' => 'Cluster', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster'])) { + $this->cluster = $vals['cluster']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->cluster = new Cluster(); + $xfer += $this->cluster->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_cluster_args'); + if ($this->cluster !== null) { + if (!is_object($this->cluster)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('cluster', TType::STRUCT, 1); + $xfer += $this->cluster->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_cluster_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'AlreadyExistsException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new AlreadyExistsException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_cluster_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_cluster_args { + static $_TSPEC; + + public $cluster_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster_name'])) { + $this->cluster_name = $vals['cluster_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cluster_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_cluster_args'); + if ($this->cluster_name !== null) { + $xfer += $output->writeFieldBegin('cluster_name', TType::STRING, 1); + $xfer += $output->writeString($this->cluster_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_cluster_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_cluster_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_cluster_args { + static $_TSPEC; + + public $cluster_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster_name'])) { + $this->cluster_name = $vals['cluster_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cluster_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_cluster_args'); + if ($this->cluster_name !== null) { + $xfer += $output->writeFieldBegin('cluster_name', TType::STRING, 1); + $xfer += $output->writeString($this->cluster_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_cluster_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => 'Cluster', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'NoSuchObjectException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new Cluster(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new NoSuchObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_cluster_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_list_clusters_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_list_clusters_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_list_clusters_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_list_clusters_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => 'Cluster', + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_list_clusters_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size473 = 0; + $_etype476 = 0; + $xfer += $input->readListBegin($_etype476, $_size473); + for ($_i477 = 0; $_i477 < $_size473; ++$_i477) + { + $elem478 = null; + $elem478 = new Cluster(); + $xfer += $elem478->read($input); + $this->success []= $elem478; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_list_clusters_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRUCT, count($this->success)); + { + foreach ($this->success as $iter479) + { + $xfer += $iter479->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_cluster_args { + static $_TSPEC; + + public $cluster_name = null; + public $cluster = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'cluster', + 'type' => TType::STRUCT, + 'class' => 'Cluster', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster_name'])) { + $this->cluster_name = $vals['cluster_name']; + } + if (isset($vals['cluster'])) { + $this->cluster = $vals['cluster']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_cluster_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->cluster_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->cluster = new Cluster(); + $xfer += $this->cluster->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_cluster_args'); + if ($this->cluster_name !== null) { + $xfer += $output->writeFieldBegin('cluster_name', TType::STRING, 1); + $xfer += $output->writeString($this->cluster_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->cluster !== null) { + if (!is_object($this->cluster)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('cluster', TType::STRUCT, 2); + $xfer += $this->cluster->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_cluster_result { + static $_TSPEC; + + public $o1 = null; + public $o2 = null; + public $o3 = null; + public $o4 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => 'MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => 'InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => 'NoSuchObjectException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, + 'class' => 'AlreadyExistsException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_cluster_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new NoSuchObjectException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new AlreadyExistsException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_cluster_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_create_role_args { static $_TSPEC; @@ -16420,14 +18389,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size441 = 0; - $_etype444 = 0; - $xfer += $input->readListBegin($_etype444, $_size441); - for ($_i445 = 0; $_i445 < $_size441; ++$_i445) + $_size480 = 0; + $_etype483 = 0; + $xfer += $input->readListBegin($_etype483, $_size480); + for ($_i484 = 0; $_i484 < $_size480; ++$_i484) { - $elem446 = null; - $xfer += $input->readString($elem446); - $this->success []= $elem446; + $elem485 = null; + $xfer += $input->readString($elem485); + $this->success []= $elem485; } $xfer += $input->readListEnd(); } else { @@ -16463,9 +18432,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter447) + foreach ($this->success as $iter486) { - $xfer += $output->writeString($iter447); + $xfer += $output->writeString($iter486); } } $output->writeListEnd(); @@ -17105,15 +19074,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size448 = 0; - $_etype451 = 0; - $xfer += $input->readListBegin($_etype451, $_size448); - for ($_i452 = 0; $_i452 < $_size448; ++$_i452) + $_size487 = 0; + $_etype490 = 0; + $xfer += $input->readListBegin($_etype490, $_size487); + for ($_i491 = 0; $_i491 < $_size487; ++$_i491) { - $elem453 = null; - $elem453 = new Role(); - $xfer += $elem453->read($input); - $this->success []= $elem453; + $elem492 = null; + $elem492 = new Role(); + $xfer += $elem492->read($input); + $this->success []= $elem492; } $xfer += $input->readListEnd(); } else { @@ -17149,9 +19118,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter454) + foreach ($this->success as $iter493) { - $xfer += $iter454->write($output); + $xfer += $iter493->write($output); } } $output->writeListEnd(); @@ -17249,14 +19218,14 @@ case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size455 = 0; - $_etype458 = 0; - $xfer += $input->readListBegin($_etype458, $_size455); - for ($_i459 = 0; $_i459 < $_size455; ++$_i459) + $_size494 = 0; + $_etype497 = 0; + $xfer += $input->readListBegin($_etype497, $_size494); + for ($_i498 = 0; $_i498 < $_size494; ++$_i498) { - $elem460 = null; - $xfer += $input->readString($elem460); - $this->group_names []= $elem460; + $elem499 = null; + $xfer += $input->readString($elem499); + $this->group_names []= $elem499; } $xfer += $input->readListEnd(); } else { @@ -17297,9 +19266,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter461) + foreach ($this->group_names as $iter500) { - $xfer += $output->writeString($iter461); + $xfer += $output->writeString($iter500); } } $output->writeListEnd(); @@ -17586,15 +19555,15 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size462 = 0; - $_etype465 = 0; - $xfer += $input->readListBegin($_etype465, $_size462); - for ($_i466 = 0; $_i466 < $_size462; ++$_i466) + $_size501 = 0; + $_etype504 = 0; + $xfer += $input->readListBegin($_etype504, $_size501); + for ($_i505 = 0; $_i505 < $_size501; ++$_i505) { - $elem467 = null; - $elem467 = new HiveObjectPrivilege(); - $xfer += $elem467->read($input); - $this->success []= $elem467; + $elem506 = null; + $elem506 = new HiveObjectPrivilege(); + $xfer += $elem506->read($input); + $this->success []= $elem506; } $xfer += $input->readListEnd(); } else { @@ -17630,9 +19599,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter468) + foreach ($this->success as $iter507) { - $xfer += $iter468->write($output); + $xfer += $iter507->write($output); } } $output->writeListEnd(); @@ -18055,14 +20024,14 @@ case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size469 = 0; - $_etype472 = 0; - $xfer += $input->readListBegin($_etype472, $_size469); - for ($_i473 = 0; $_i473 < $_size469; ++$_i473) + $_size508 = 0; + $_etype511 = 0; + $xfer += $input->readListBegin($_etype511, $_size508); + for ($_i512 = 0; $_i512 < $_size508; ++$_i512) { - $elem474 = null; - $xfer += $input->readString($elem474); - $this->group_names []= $elem474; + $elem513 = null; + $xfer += $input->readString($elem513); + $this->group_names []= $elem513; } $xfer += $input->readListEnd(); } else { @@ -18095,9 +20064,9 @@ { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter475) + foreach ($this->group_names as $iter514) { - $xfer += $output->writeString($iter475); + $xfer += $output->writeString($iter514); } } $output->writeListEnd(); @@ -18167,14 +20136,14 @@ case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size476 = 0; - $_etype479 = 0; - $xfer += $input->readListBegin($_etype479, $_size476); - for ($_i480 = 0; $_i480 < $_size476; ++$_i480) + $_size515 = 0; + $_etype518 = 0; + $xfer += $input->readListBegin($_etype518, $_size515); + for ($_i519 = 0; $_i519 < $_size515; ++$_i519) { - $elem481 = null; - $xfer += $input->readString($elem481); - $this->success []= $elem481; + $elem520 = null; + $xfer += $input->readString($elem520); + $this->success []= $elem520; } $xfer += $input->readListEnd(); } else { @@ -18210,9 +20179,9 @@ { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter482) + foreach ($this->success as $iter521) { - $xfer += $output->writeString($iter482); + $xfer += $output->writeString($iter521); } } $output->writeListEnd(); Index: metastore/src/gen/thrift/gen-php/hive_metastore/hive_metastore_types.php =================================================================== --- metastore/src/gen/thrift/gen-php/hive_metastore/hive_metastore_types.php (revision 1235046) +++ metastore/src/gen/thrift/gen-php/hive_metastore/hive_metastore_types.php (working copy) @@ -1396,6 +1396,383 @@ } +class Cluster { + static $_TSPEC; + + public $name = null; + public $comment = null; + public $createTime = null; + public $locationUri = null; + public $parameters = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'comment', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'createTime', + 'type' => TType::I32, + ), + 4 => array( + 'var' => 'locationUri', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'parameters', + 'type' => TType::MAP, + 'ktype' => TType::STRING, + 'vtype' => TType::STRING, + 'key' => array( + 'type' => TType::STRING, + ), + 'val' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['comment'])) { + $this->comment = $vals['comment']; + } + if (isset($vals['createTime'])) { + $this->createTime = $vals['createTime']; + } + if (isset($vals['locationUri'])) { + $this->locationUri = $vals['locationUri']; + } + if (isset($vals['parameters'])) { + $this->parameters = $vals['parameters']; + } + } + } + + public function getName() { + return 'Cluster'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->comment); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->createTime); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->locationUri); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::MAP) { + $this->parameters = array(); + $_size69 = 0; + $_ktype70 = 0; + $_vtype71 = 0; + $xfer += $input->readMapBegin($_ktype70, $_vtype71, $_size69); + for ($_i73 = 0; $_i73 < $_size69; ++$_i73) + { + $key74 = ''; + $val75 = ''; + $xfer += $input->readString($key74); + $xfer += $input->readString($val75); + $this->parameters[$key74] = $val75; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('Cluster'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->comment !== null) { + $xfer += $output->writeFieldBegin('comment', TType::STRING, 2); + $xfer += $output->writeString($this->comment); + $xfer += $output->writeFieldEnd(); + } + if ($this->createTime !== null) { + $xfer += $output->writeFieldBegin('createTime', TType::I32, 3); + $xfer += $output->writeI32($this->createTime); + $xfer += $output->writeFieldEnd(); + } + if ($this->locationUri !== null) { + $xfer += $output->writeFieldBegin('locationUri', TType::STRING, 4); + $xfer += $output->writeString($this->locationUri); + $xfer += $output->writeFieldEnd(); + } + if ($this->parameters !== null) { + if (!is_array($this->parameters)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('parameters', TType::MAP, 5); + { + $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); + { + foreach ($this->parameters as $kiter76 => $viter77) + { + $xfer += $output->writeString($kiter76); + $xfer += $output->writeString($viter77); + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ClusterStorageDescriptor { + static $_TSPEC; + + public $cluster = null; + public $location = null; + public $primary = null; + public $dataSynced = null; + public $parameters = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'cluster', + 'type' => TType::STRUCT, + 'class' => 'Cluster', + ), + 2 => array( + 'var' => 'location', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'primary', + 'type' => TType::BOOL, + ), + 4 => array( + 'var' => 'dataSynced', + 'type' => TType::BOOL, + ), + 5 => array( + 'var' => 'parameters', + 'type' => TType::MAP, + 'ktype' => TType::STRING, + 'vtype' => TType::STRING, + 'key' => array( + 'type' => TType::STRING, + ), + 'val' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['cluster'])) { + $this->cluster = $vals['cluster']; + } + if (isset($vals['location'])) { + $this->location = $vals['location']; + } + if (isset($vals['primary'])) { + $this->primary = $vals['primary']; + } + if (isset($vals['dataSynced'])) { + $this->dataSynced = $vals['dataSynced']; + } + if (isset($vals['parameters'])) { + $this->parameters = $vals['parameters']; + } + } + } + + public function getName() { + return 'ClusterStorageDescriptor'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->cluster = new Cluster(); + $xfer += $this->cluster->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->location); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->primary); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->dataSynced); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::MAP) { + $this->parameters = array(); + $_size78 = 0; + $_ktype79 = 0; + $_vtype80 = 0; + $xfer += $input->readMapBegin($_ktype79, $_vtype80, $_size78); + for ($_i82 = 0; $_i82 < $_size78; ++$_i82) + { + $key83 = ''; + $val84 = ''; + $xfer += $input->readString($key83); + $xfer += $input->readString($val84); + $this->parameters[$key83] = $val84; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ClusterStorageDescriptor'); + if ($this->cluster !== null) { + if (!is_object($this->cluster)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('cluster', TType::STRUCT, 1); + $xfer += $this->cluster->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->location !== null) { + $xfer += $output->writeFieldBegin('location', TType::STRING, 2); + $xfer += $output->writeString($this->location); + $xfer += $output->writeFieldEnd(); + } + if ($this->primary !== null) { + $xfer += $output->writeFieldBegin('primary', TType::BOOL, 3); + $xfer += $output->writeBool($this->primary); + $xfer += $output->writeFieldEnd(); + } + if ($this->dataSynced !== null) { + $xfer += $output->writeFieldBegin('dataSynced', TType::BOOL, 4); + $xfer += $output->writeBool($this->dataSynced); + $xfer += $output->writeFieldEnd(); + } + if ($this->parameters !== null) { + if (!is_array($this->parameters)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('parameters', TType::MAP, 5); + { + $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); + { + foreach ($this->parameters as $kiter85 => $viter86) + { + $xfer += $output->writeString($kiter85); + $xfer += $output->writeString($viter86); + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class Database { static $_TSPEC; @@ -1501,17 +1878,17 @@ case 4: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size69 = 0; - $_ktype70 = 0; - $_vtype71 = 0; - $xfer += $input->readMapBegin($_ktype70, $_vtype71, $_size69); - for ($_i73 = 0; $_i73 < $_size69; ++$_i73) + $_size87 = 0; + $_ktype88 = 0; + $_vtype89 = 0; + $xfer += $input->readMapBegin($_ktype88, $_vtype89, $_size87); + for ($_i91 = 0; $_i91 < $_size87; ++$_i91) { - $key74 = ''; - $val75 = ''; - $xfer += $input->readString($key74); - $xfer += $input->readString($val75); - $this->parameters[$key74] = $val75; + $key92 = ''; + $val93 = ''; + $xfer += $input->readString($key92); + $xfer += $input->readString($val93); + $this->parameters[$key92] = $val93; } $xfer += $input->readMapEnd(); } else { @@ -1562,10 +1939,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter76 => $viter77) + foreach ($this->parameters as $kiter94 => $viter95) { - $xfer += $output->writeString($kiter76); - $xfer += $output->writeString($viter77); + $xfer += $output->writeString($kiter94); + $xfer += $output->writeString($viter95); } } $output->writeMapEnd(); @@ -1668,17 +2045,17 @@ case 3: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size78 = 0; - $_ktype79 = 0; - $_vtype80 = 0; - $xfer += $input->readMapBegin($_ktype79, $_vtype80, $_size78); - for ($_i82 = 0; $_i82 < $_size78; ++$_i82) + $_size96 = 0; + $_ktype97 = 0; + $_vtype98 = 0; + $xfer += $input->readMapBegin($_ktype97, $_vtype98, $_size96); + for ($_i100 = 0; $_i100 < $_size96; ++$_i100) { - $key83 = ''; - $val84 = ''; - $xfer += $input->readString($key83); - $xfer += $input->readString($val84); - $this->parameters[$key83] = $val84; + $key101 = ''; + $val102 = ''; + $xfer += $input->readString($key101); + $xfer += $input->readString($val102); + $this->parameters[$key101] = $val102; } $xfer += $input->readMapEnd(); } else { @@ -1716,10 +2093,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter85 => $viter86) + foreach ($this->parameters as $kiter103 => $viter104) { - $xfer += $output->writeString($kiter85); - $xfer += $output->writeString($viter86); + $xfer += $output->writeString($kiter103); + $xfer += $output->writeString($viter104); } } $output->writeMapEnd(); @@ -1838,6 +2215,7 @@ public $bucketCols = null; public $sortCols = null; public $parameters = null; + public $clusterStorage = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -1905,6 +2283,15 @@ 'type' => TType::STRING, ), ), + 11 => array( + 'var' => 'clusterStorage', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => 'ClusterStorageDescriptor', + ), + ), ); } if (is_array($vals)) { @@ -1938,6 +2325,9 @@ if (isset($vals['parameters'])) { $this->parameters = $vals['parameters']; } + if (isset($vals['clusterStorage'])) { + $this->clusterStorage = $vals['clusterStorage']; + } } } @@ -1963,15 +2353,15 @@ case 1: if ($ftype == TType::LST) { $this->cols = array(); - $_size87 = 0; - $_etype90 = 0; - $xfer += $input->readListBegin($_etype90, $_size87); - for ($_i91 = 0; $_i91 < $_size87; ++$_i91) + $_size105 = 0; + $_etype108 = 0; + $xfer += $input->readListBegin($_etype108, $_size105); + for ($_i109 = 0; $_i109 < $_size105; ++$_i109) { - $elem92 = null; - $elem92 = new FieldSchema(); - $xfer += $elem92->read($input); - $this->cols []= $elem92; + $elem110 = null; + $elem110 = new FieldSchema(); + $xfer += $elem110->read($input); + $this->cols []= $elem110; } $xfer += $input->readListEnd(); } else { @@ -2024,14 +2414,14 @@ case 8: if ($ftype == TType::LST) { $this->bucketCols = array(); - $_size93 = 0; - $_etype96 = 0; - $xfer += $input->readListBegin($_etype96, $_size93); - for ($_i97 = 0; $_i97 < $_size93; ++$_i97) + $_size111 = 0; + $_etype114 = 0; + $xfer += $input->readListBegin($_etype114, $_size111); + for ($_i115 = 0; $_i115 < $_size111; ++$_i115) { - $elem98 = null; - $xfer += $input->readString($elem98); - $this->bucketCols []= $elem98; + $elem116 = null; + $xfer += $input->readString($elem116); + $this->bucketCols []= $elem116; } $xfer += $input->readListEnd(); } else { @@ -2041,15 +2431,15 @@ case 9: if ($ftype == TType::LST) { $this->sortCols = array(); - $_size99 = 0; - $_etype102 = 0; - $xfer += $input->readListBegin($_etype102, $_size99); - for ($_i103 = 0; $_i103 < $_size99; ++$_i103) + $_size117 = 0; + $_etype120 = 0; + $xfer += $input->readListBegin($_etype120, $_size117); + for ($_i121 = 0; $_i121 < $_size117; ++$_i121) { - $elem104 = null; - $elem104 = new Order(); - $xfer += $elem104->read($input); - $this->sortCols []= $elem104; + $elem122 = null; + $elem122 = new Order(); + $xfer += $elem122->read($input); + $this->sortCols []= $elem122; } $xfer += $input->readListEnd(); } else { @@ -2059,23 +2449,41 @@ case 10: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size105 = 0; - $_ktype106 = 0; - $_vtype107 = 0; - $xfer += $input->readMapBegin($_ktype106, $_vtype107, $_size105); - for ($_i109 = 0; $_i109 < $_size105; ++$_i109) + $_size123 = 0; + $_ktype124 = 0; + $_vtype125 = 0; + $xfer += $input->readMapBegin($_ktype124, $_vtype125, $_size123); + for ($_i127 = 0; $_i127 < $_size123; ++$_i127) { - $key110 = ''; - $val111 = ''; - $xfer += $input->readString($key110); - $xfer += $input->readString($val111); - $this->parameters[$key110] = $val111; + $key128 = ''; + $val129 = ''; + $xfer += $input->readString($key128); + $xfer += $input->readString($val129); + $this->parameters[$key128] = $val129; } $xfer += $input->readMapEnd(); } else { $xfer += $input->skip($ftype); } break; + case 11: + if ($ftype == TType::LST) { + $this->clusterStorage = array(); + $_size130 = 0; + $_etype133 = 0; + $xfer += $input->readListBegin($_etype133, $_size130); + for ($_i134 = 0; $_i134 < $_size130; ++$_i134) + { + $elem135 = null; + $elem135 = new ClusterStorageDescriptor(); + $xfer += $elem135->read($input); + $this->clusterStorage []= $elem135; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2097,9 +2505,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter112) + foreach ($this->cols as $iter136) { - $xfer += $iter112->write($output); + $xfer += $iter136->write($output); } } $output->writeListEnd(); @@ -2147,9 +2555,9 @@ { $output->writeListBegin(TType::STRING, count($this->bucketCols)); { - foreach ($this->bucketCols as $iter113) + foreach ($this->bucketCols as $iter137) { - $xfer += $output->writeString($iter113); + $xfer += $output->writeString($iter137); } } $output->writeListEnd(); @@ -2164,9 +2572,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->sortCols)); { - foreach ($this->sortCols as $iter114) + foreach ($this->sortCols as $iter138) { - $xfer += $iter114->write($output); + $xfer += $iter138->write($output); } } $output->writeListEnd(); @@ -2181,16 +2589,33 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter115 => $viter116) + foreach ($this->parameters as $kiter139 => $viter140) { - $xfer += $output->writeString($kiter115); - $xfer += $output->writeString($viter116); + $xfer += $output->writeString($kiter139); + $xfer += $output->writeString($viter140); } } $output->writeMapEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->clusterStorage !== null) { + if (!is_array($this->clusterStorage)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('clusterStorage', TType::LST, 11); + { + $output->writeListBegin(TType::STRUCT, count($this->clusterStorage)); + { + foreach ($this->clusterStorage as $iter141) + { + $xfer += $iter141->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -2402,15 +2827,15 @@ case 8: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size117 = 0; - $_etype120 = 0; - $xfer += $input->readListBegin($_etype120, $_size117); - for ($_i121 = 0; $_i121 < $_size117; ++$_i121) + $_size142 = 0; + $_etype145 = 0; + $xfer += $input->readListBegin($_etype145, $_size142); + for ($_i146 = 0; $_i146 < $_size142; ++$_i146) { - $elem122 = null; - $elem122 = new FieldSchema(); - $xfer += $elem122->read($input); - $this->partitionKeys []= $elem122; + $elem147 = null; + $elem147 = new FieldSchema(); + $xfer += $elem147->read($input); + $this->partitionKeys []= $elem147; } $xfer += $input->readListEnd(); } else { @@ -2420,17 +2845,17 @@ case 9: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size123 = 0; - $_ktype124 = 0; - $_vtype125 = 0; - $xfer += $input->readMapBegin($_ktype124, $_vtype125, $_size123); - for ($_i127 = 0; $_i127 < $_size123; ++$_i127) + $_size148 = 0; + $_ktype149 = 0; + $_vtype150 = 0; + $xfer += $input->readMapBegin($_ktype149, $_vtype150, $_size148); + for ($_i152 = 0; $_i152 < $_size148; ++$_i152) { - $key128 = ''; - $val129 = ''; - $xfer += $input->readString($key128); - $xfer += $input->readString($val129); - $this->parameters[$key128] = $val129; + $key153 = ''; + $val154 = ''; + $xfer += $input->readString($key153); + $xfer += $input->readString($val154); + $this->parameters[$key153] = $val154; } $xfer += $input->readMapEnd(); } else { @@ -2525,9 +2950,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter130) + foreach ($this->partitionKeys as $iter155) { - $xfer += $iter130->write($output); + $xfer += $iter155->write($output); } } $output->writeListEnd(); @@ -2542,10 +2967,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter131 => $viter132) + foreach ($this->parameters as $kiter156 => $viter157) { - $xfer += $output->writeString($kiter131); - $xfer += $output->writeString($viter132); + $xfer += $output->writeString($kiter156); + $xfer += $output->writeString($viter157); } } $output->writeMapEnd(); @@ -2695,14 +3120,14 @@ case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size133 = 0; - $_etype136 = 0; - $xfer += $input->readListBegin($_etype136, $_size133); - for ($_i137 = 0; $_i137 < $_size133; ++$_i137) + $_size158 = 0; + $_etype161 = 0; + $xfer += $input->readListBegin($_etype161, $_size158); + for ($_i162 = 0; $_i162 < $_size158; ++$_i162) { - $elem138 = null; - $xfer += $input->readString($elem138); - $this->values []= $elem138; + $elem163 = null; + $xfer += $input->readString($elem163); + $this->values []= $elem163; } $xfer += $input->readListEnd(); } else { @@ -2748,17 +3173,17 @@ case 7: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size139 = 0; - $_ktype140 = 0; - $_vtype141 = 0; - $xfer += $input->readMapBegin($_ktype140, $_vtype141, $_size139); - for ($_i143 = 0; $_i143 < $_size139; ++$_i143) + $_size164 = 0; + $_ktype165 = 0; + $_vtype166 = 0; + $xfer += $input->readMapBegin($_ktype165, $_vtype166, $_size164); + for ($_i168 = 0; $_i168 < $_size164; ++$_i168) { - $key144 = ''; - $val145 = ''; - $xfer += $input->readString($key144); - $xfer += $input->readString($val145); - $this->parameters[$key144] = $val145; + $key169 = ''; + $val170 = ''; + $xfer += $input->readString($key169); + $xfer += $input->readString($val170); + $this->parameters[$key169] = $val170; } $xfer += $input->readMapEnd(); } else { @@ -2794,9 +3219,9 @@ { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter146) + foreach ($this->values as $iter171) { - $xfer += $output->writeString($iter146); + $xfer += $output->writeString($iter171); } } $output->writeListEnd(); @@ -2839,10 +3264,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter147 => $viter148) + foreach ($this->parameters as $kiter172 => $viter173) { - $xfer += $output->writeString($kiter147); - $xfer += $output->writeString($viter148); + $xfer += $output->writeString($kiter172); + $xfer += $output->writeString($viter173); } } $output->writeMapEnd(); @@ -3045,17 +3470,17 @@ case 9: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size149 = 0; - $_ktype150 = 0; - $_vtype151 = 0; - $xfer += $input->readMapBegin($_ktype150, $_vtype151, $_size149); - for ($_i153 = 0; $_i153 < $_size149; ++$_i153) + $_size174 = 0; + $_ktype175 = 0; + $_vtype176 = 0; + $xfer += $input->readMapBegin($_ktype175, $_vtype176, $_size174); + for ($_i178 = 0; $_i178 < $_size174; ++$_i178) { - $key154 = ''; - $val155 = ''; - $xfer += $input->readString($key154); - $xfer += $input->readString($val155); - $this->parameters[$key154] = $val155; + $key179 = ''; + $val180 = ''; + $xfer += $input->readString($key179); + $xfer += $input->readString($val180); + $this->parameters[$key179] = $val180; } $xfer += $input->readMapEnd(); } else { @@ -3133,10 +3558,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter156 => $viter157) + foreach ($this->parameters as $kiter181 => $viter182) { - $xfer += $output->writeString($kiter156); - $xfer += $output->writeString($viter157); + $xfer += $output->writeString($kiter181); + $xfer += $output->writeString($viter182); } } $output->writeMapEnd(); @@ -3219,15 +3644,15 @@ case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size158 = 0; - $_etype161 = 0; - $xfer += $input->readListBegin($_etype161, $_size158); - for ($_i162 = 0; $_i162 < $_size158; ++$_i162) + $_size183 = 0; + $_etype186 = 0; + $xfer += $input->readListBegin($_etype186, $_size183); + for ($_i187 = 0; $_i187 < $_size183; ++$_i187) { - $elem163 = null; - $elem163 = new FieldSchema(); - $xfer += $elem163->read($input); - $this->fieldSchemas []= $elem163; + $elem188 = null; + $elem188 = new FieldSchema(); + $xfer += $elem188->read($input); + $this->fieldSchemas []= $elem188; } $xfer += $input->readListEnd(); } else { @@ -3237,17 +3662,17 @@ case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size164 = 0; - $_ktype165 = 0; - $_vtype166 = 0; - $xfer += $input->readMapBegin($_ktype165, $_vtype166, $_size164); - for ($_i168 = 0; $_i168 < $_size164; ++$_i168) + $_size189 = 0; + $_ktype190 = 0; + $_vtype191 = 0; + $xfer += $input->readMapBegin($_ktype190, $_vtype191, $_size189); + for ($_i193 = 0; $_i193 < $_size189; ++$_i193) { - $key169 = ''; - $val170 = ''; - $xfer += $input->readString($key169); - $xfer += $input->readString($val170); - $this->properties[$key169] = $val170; + $key194 = ''; + $val195 = ''; + $xfer += $input->readString($key194); + $xfer += $input->readString($val195); + $this->properties[$key194] = $val195; } $xfer += $input->readMapEnd(); } else { @@ -3275,9 +3700,9 @@ { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter171) + foreach ($this->fieldSchemas as $iter196) { - $xfer += $iter171->write($output); + $xfer += $iter196->write($output); } } $output->writeListEnd(); @@ -3292,10 +3717,10 @@ { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter172 => $viter173) + foreach ($this->properties as $kiter197 => $viter198) { - $xfer += $output->writeString($kiter172); - $xfer += $output->writeString($viter173); + $xfer += $output->writeString($kiter197); + $xfer += $output->writeString($viter198); } } $output->writeMapEnd(); Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 1235046) +++ metastore/if/hive_metastore.thrift (working copy) @@ -90,6 +90,22 @@ 3: string ownerName, } +struct Cluster { + 1: string name, + 2: string comment, + 3: i32 createTime,// creation time of the table + 4: string locationUri, + 5: map parameters, +} + +struct ClusterStorageDescriptor { + 1: Cluster cluster, + 2: string location, + 3: bool primary, + 4: bool dataSynced, + 5: map parameters, +} + // namespace for tables struct Database { 1: string name, @@ -123,7 +139,8 @@ 7: SerDeInfo serdeInfo, // serialization and deserialization information 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` 9: list sortCols, // sort order of the data in each bucket - 10: map parameters // any user supplied key value hash + 10: map parameters, // any user supplied key value hash + 11: optional list clusterStorage, } // table information @@ -257,6 +274,12 @@ // delete data (including partitions) if deleteData is set to true void drop_table(1:string dbname, 2:string name, 3:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o3) + + // drops the table and all the partitions associated with it if the table has partitions + // delete data (including partitions) if deleteData is set to true + void drop_table_on_cluster(1:string cluster_name, 2:string dbname, 3:string name, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidOperationException o3) + list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) list get_all_tables(1: string db_name) throws (1: MetaException o1) @@ -319,6 +342,10 @@ throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o2) + bool drop_partition_on_cluster(1:string cluster_name, 2:string db_name, 3:string tbl_name, + 4:list part_vals, 5:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o2, + 3: InvalidOperationException o3) + bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) throws(1:NoSuchObjectException o1, 2:MetaException o2) Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) @@ -415,7 +442,16 @@ throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) throws(1:MetaException o2) - + + //cluster + void create_cluster(1:Cluster cluster) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + void drop_cluster(1:string cluster_name) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + Cluster get_cluster(1:string cluster_name) throws (1:MetaException o1, 2:NoSuchObjectException o2) + list list_clusters() throws (1: MetaException o1) + void alter_cluster(1:string cluster_name, 2:Cluster cluster) + throws(1:MetaException o1, 2:InvalidObjectException o2, + 3:NoSuchObjectException o3, 4:AlreadyExistsException o4) + //authorization privileges bool create_role(1:Role role) throws(1:MetaException o1) Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1235046) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -136,6 +136,13 @@ HiveConf.ConfVars.SCRATCHDIR }; + public static final HiveConf.ConfVars[] clusterVars = { + HiveConf.ConfVars.HADOOPBIN, + HiveConf.ConfVars.HADOOPJT, + HiveConf.ConfVars.METASTOREWAREHOUSE, + HiveConf.ConfVars.SCRATCHDIR + }; + /** * ConfVars. * @@ -556,7 +563,8 @@ // Whether to delete the scratchdir while startup HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false), HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false), - HIVE_FILES_UMASK_VALUE("hive.files.umask.vlaue", 0002), + HIVE_CLUSTER_CONF("hive.cluster.conf",""), + HIVE_FILES_UMASK_VALUE("hive.files.umask.vlaue", 0002) ; public final String varname; Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.ClusterStorageDescriptor; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; @@ -611,4 +612,14 @@ public void setLastAccessTime(int lastAccessTime) { tPartition.setLastAccessTime(lastAccessTime); } + + public List getClusterStorageDescriptors() { + return tPartition.getSd().getClusterStorage(); + } + + public void setClusterStorageDescriptors( + List clusterStorage) { + tPartition.getSd().setClusterStorage(clusterStorage); + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (working copy) @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.ClusterStorageDescriptor; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -328,6 +329,15 @@ return outputFormatClass; } + final public List getClusterStorageDescriptors () { + return tTable.getSd().getClusterStorage(); + } + + final public void setClusterStorageDescriptors( + List clusterStorages) { + tTable.getSd().setClusterStorage(clusterStorages); + } + final public boolean isValidSpec(Map spec) throws HiveException { Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -56,6 +56,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Cluster; import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -99,6 +100,8 @@ private IMetaStoreClient metaStoreClient; private String currentDatabase; + private Cluster currentCluster; + private static ThreadLocal hiveDB = new ThreadLocal() { @Override protected synchronized Object initialValue() { @@ -458,7 +461,7 @@ throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. "); } List pvals = new ArrayList(); - + for (FieldSchema field : tbl.getPartCols()) { String val = oldPartSpec.get(field.getName()); if (val == null || val.length() == 0) { @@ -780,7 +783,7 @@ */ public void dropTable(String tableName) throws HiveException { Table t = newTable(tableName); - dropTable(t.getDbName(), t.getTableName(), true, true); + dropTable(t.getDbName(), t.getTableName()); } /** @@ -800,7 +803,12 @@ * thrown if the drop fails */ public void dropTable(String dbName, String tableName) throws HiveException { - dropTable(dbName, tableName, true, true); + if (getCurrentCluster() != null) { + dropTableOnCluster(getCurrentCluster().getName(), dbName, + tableName, true, true); + } else { + dropTable(dbName, tableName, true, true); + } } /** @@ -817,7 +825,8 @@ boolean ignoreUnknownTab) throws HiveException { try { - getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + getMSC().dropTable(dbName, tableName, deleteData, + ignoreUnknownTab); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw new HiveException(e); @@ -827,6 +836,30 @@ } } + /** + * Drop the table from the cluster + * + * @param clusterName + * @param dbName + * @param tableName + * @param deleteData + * @param ignoreUnknownTab + */ + public void dropTableOnCluster(String clusterName, + String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTab) throws HiveException { + try { + getMSC().dropTableOnCluster(getCurrentCluster().getName(), + dbName, tableName, deleteData, ignoreUnknownTab); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw new HiveException(e); + } + } catch (Exception e) { + throw new HiveException(e); + } + } + public HiveConf getConf() { return (conf); } @@ -1221,7 +1254,7 @@ validPartitions.add(s.getPath().getParent()); } } - + if (validPartitions.size() == 0) { LOG.warn("No partition is genereated by dynamic partitioning"); } @@ -1242,7 +1275,7 @@ Path partPath = iter.next(); assert fs.getFileStatus(partPath).isDir(): "partitions " + partPath + " is not a directory !"; - + // generate a full partition specification LinkedHashMap fullPartSpec = new LinkedHashMap(partSpec); Warehouse.makeSpecFromName(fullPartSpec, partPath); @@ -1480,9 +1513,28 @@ public boolean dropPartition(String tblName, List part_vals, boolean deleteData) throws HiveException { Table t = newTable(tblName); - return dropPartition(t.getDbName(), t.getTableName(), part_vals, deleteData); + if(getCurrentCluster() != null) { + return dropPartitionOnCluster(getCurrentCluster() + .getName(), t.getDbName(), t.getTableName(), part_vals, + deleteData); + } else { + return dropPartition(t.getDbName(), t.getTableName(), part_vals, deleteData); + } } + public boolean dropPartitionOnCluster(String cluster_name, + String db_name, String tbl_name, + List part_vals, boolean deleteData) throws HiveException{ + try { + return getMSC().dropPartitionWithClusterName(cluster_name, + db_name, tbl_name, part_vals, deleteData); + } catch (NoSuchObjectException e) { + throw new HiveException("Partition or table doesn't exist.", e); + } catch (Exception e) { + throw new HiveException("Unknown error. Please check logs.", e); + } + } + public boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) throws HiveException { try { @@ -2186,4 +2238,71 @@ private static String[] getQualifiedNames(String qualifiedName) { return qualifiedName.split("\\."); } + + public void createCluster(String name, String clusterComment, + String clusterLocation, Map clusterProps, + boolean ifNotExist) throws HiveException { + org.apache.hadoop.hive.metastore.api.Cluster cluster = + new org.apache.hadoop.hive.metastore.api.Cluster(name, + clusterComment, (int) System.currentTimeMillis(), + clusterLocation, clusterProps); + try { + getMSC().createCluster(cluster); + } catch (AlreadyExistsException e) { + if (!ifNotExist) { + throw new HiveException(e.getMessage()); + } + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropCluster(String name, boolean ifExist) + throws NoSuchObjectException, HiveException { + try { + getMSC().dropCluster(name); + } catch (NoSuchObjectException e) { + if (!ifExist) { + throw e; + } + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void setCurrentCluster(Cluster cluster) { + this.currentCluster = cluster; + } + + public Cluster getCurrentCluster() { + return this.currentCluster; + } + + public Cluster getCluster(String clusterName) + throws NoSuchObjectException, HiveException { + try{ + Cluster clusterObj = getMSC().getCluster(clusterName); + return clusterObj; + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List getClusters() throws HiveException { + try { + return getMSC().listClusters(); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void alterCluster(String name, Cluster cluster) throws HiveException { + try{ + getMSC().alter_cluster(name, cluster); + } catch (Exception e) { + throw new HiveException(e); + } + } }; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -57,6 +57,8 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Cluster; +import org.apache.hadoop.hive.metastore.api.ClusterStorageDescriptor; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; @@ -101,6 +103,7 @@ import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.ClusterDDLDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; @@ -374,6 +377,11 @@ return mergeFiles(db, mergeFilesDesc); } + ClusterDDLDesc clusterDDLDesc = work.getClusterDesc(); + if(clusterDDLDesc != null) { + return clusterDDL(db, clusterDDLDesc); + } + } catch (InvalidTableException e) { console.printError("Table " + e.getTableName() + " does not exist"); LOG.debug(stringifyException(e)); @@ -744,6 +752,154 @@ return 0; } + private int clusterDDL(Hive db, ClusterDDLDesc clusterDDLDesc) + throws HiveException, NoSuchObjectException { + ClusterDDLDesc.ClusterOperation operation = + clusterDDLDesc.getClusterOperation(); + DataOutput outStream = null; + try { + if (operation.equals(ClusterDDLDesc.ClusterOperation.ADD_CLUSTER)) { + db.createCluster(clusterDDLDesc.getName(), + clusterDDLDesc.getClusterComment(), + clusterDDLDesc.getClusterLocation(), + clusterDDLDesc.getClusterProps(), + clusterDDLDesc.getIfNotExists()); + } else if ( + operation.equals(ClusterDDLDesc.ClusterOperation.DROP_CLUSTER)) { + db.dropCluster(clusterDDLDesc.getName(), + clusterDDLDesc.getIfNotExists()); + } else if (operation.equals(ClusterDDLDesc.ClusterOperation.SHOW_CLUSTER)) { + String tableName = clusterDDLDesc.getTableName(); + Map partSpec = clusterDDLDesc.getPartSpec(); + List clusters = null; + Partition part = null; + Table tbl = null; + List clusterSDS = null; + if(tableName != null) { + tbl = db.getTable(tableName); + if(tbl == null) { + throw new NoSuchObjectException(tableName + + " does not exist."); + } + if(partSpec != null) { + if(!tbl.isPartitioned()) { + throw new HiveException(tableName + + " is not partitioned. But partition spec is given."); + } + part = db.getPartition(tbl, partSpec, false); + if(part == null) { + throw new NoSuchObjectException("Partition " + + partSpec.toString() + " does not exist on table " + + tableName); + } + clusterSDS = part.getClusterStorageDescriptors(); + } else { + clusterSDS = tbl.getClusterStorageDescriptors(); + } + + if(clusterSDS != null) { + clusters = new ArrayList(clusterSDS.size()); + for(ClusterStorageDescriptor clusterSD : clusterSDS) { + clusters.add(clusterSD.getCluster()); + } + } + } else { + clusters = db.getClusters(); + } + if (clusters != null && clusters.size() > 0) { + Path resFile = new Path(clusterDDLDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + outStream = fs.create(resFile); + for (Cluster clusterObj : clusters) { + outStream.writeBytes(clusterObj.getName()); + } + ((FSDataOutputStream) outStream).close(); + outStream = null; + } + + } else if (operation.equals( + ClusterDDLDesc.ClusterOperation.RENAME_CLUSTER)) { + Cluster cluster = db.getCluster(clusterDDLDesc.getName()); + if(cluster != null) { + cluster.setName(clusterDDLDesc.getNewName()); + } + db.alterCluster(clusterDDLDesc.getName(), cluster); + } else if (operation.equals( + ClusterDDLDesc.ClusterOperation.ALTER_CLUSTER_PROPS)) { + Cluster cluster = db.getCluster(clusterDDLDesc.getName()); + if(cluster != null) { + cluster.setParameters(clusterDDLDesc.getClusterProps()); + } + db.alterCluster(clusterDDLDesc.getName(), cluster); + } else if (operation + .equals(ClusterDDLDesc.ClusterOperation.SWITCH_CLUSTER)) { + switchCluster(db, clusterDDLDesc); + } else { + throw new HiveException("Unkown cluster operation " + + operation.getOperationName()); + } + } catch (IOException e) { + LOG.info("cluster ddl exception: " + stringifyException(e)); + return 1; + } catch (HiveException e) { + console.printError("Error in cluster operation " + + operation.getOperationName() + " on cluster name " + + clusterDDLDesc.getName() + ", error message " + + e.getMessage()); + return 1; + } finally { + IOUtils.closeStream((FSDataOutputStream) outStream); + } + + return 0; + } + + /** + * Switch to a different Cluster + * @param db + * @param clusterDesc + * @throws HiveException + */ + private void switchCluster(Hive db, ClusterDDLDesc clusterDesc) + throws HiveException { + String clusterName = clusterDesc.getName(); + Cluster cluster = null; + try { + cluster = db.getCluster(clusterName); + } catch (NoSuchObjectException e) { + throw new HiveException("ERROR: The cluster " + clusterName + + " does not exist."); + } + + db.setCurrentCluster(cluster); + + Map clusterParams = cluster.getParameters(); + if (clusterParams != null) { + for (HiveConf.ConfVars var : HiveConf.clusterVars) { + String newValue = clusterParams.get(var.varname); + if (newValue != null) { + LOG.info("Changing " + var.varname + " from " + + conf.getVar(var) + " to " + newValue); + conf.setVar(var, newValue); + } + } + + String confVals = + clusterParams.get(HiveConf.ConfVars.HIVE_CLUSTER_CONF); + if (confVals != null) { + String[] confSplit = confVals.split(","); + for (String oneConf : confSplit) { + String[] kv = oneConf.split(":"); + if (kv.length != 2) { + LOG.info("Ignoring conf setting " + oneConf); + continue; + } + conf.set(kv[0], kv[1]); + } + } + } + } + private int roleDDL(RoleDDLDesc roleDDLDesc) { RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation(); DataOutput outStream = null; @@ -3183,7 +3339,9 @@ // drop the table db.dropTable(dropTbl.getTableName()); - if (tbl != null) { + if (tbl != null + && isOnObjectPrimaryCluster(tbl + .getClusterStorageDescriptors())) { work.getOutputs().add(new WriteEntity(tbl)); } } else { @@ -3229,13 +3387,38 @@ for (Partition partition : partsToDelete) { console.printInfo("Dropping the partition " + partition.getName()); db.dropPartition(dropTbl.getTableName(), partition.getValues(), true); - work.getOutputs().add(new WriteEntity(partition)); + if (isOnObjectPrimaryCluster(partition.getClusterStorageDescriptors())) { + work.getOutputs().add(new WriteEntity(partition)); + } } } return 0; } + public boolean isOnObjectPrimaryCluster( + List clusterStorageDescriptors) + throws HiveException { + if (db.getCurrentCluster() == null) { + if(clusterStorageDescriptors != null && !clusterStorageDescriptors.isEmpty()) { + throw new HiveException("Current cluster is not set"); + } + // no current cluster, and the hive object does not have a list of cluster + // storages + return true; + } + + for (ClusterStorageDescriptor clusterSD : clusterStorageDescriptors) { + if (clusterSD.isPrimary() + && clusterSD.getCluster().getName().equals( + db.getCurrentCluster().getName())) { + return true; + } + } + + return false; + } + /** * Update last_modified_by and last_modified_time parameters in parameter map. * Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -60,6 +60,7 @@ private ShowIndexesDesc showIndexesDesc; private DescDatabaseDesc descDbDesc; private AlterDatabaseDesc alterDbDesc; + private ClusterDDLDesc clusterDesc; private RoleDDLDesc roleDDLDesc; private GrantDesc grantDesc; @@ -400,6 +401,16 @@ this.mergeFilesDesc = mergeDesc; } + public DDLWork(HashSet inputs, + HashSet outputs, ClusterDDLDesc clusterDesc) { + this(inputs, outputs); + this.clusterDesc = clusterDesc; + } + + public DDLWork(ClusterDDLDesc clusterDesc) { + this.clusterDesc = clusterDesc; + } + /** * @return Create Database descriptor */ @@ -798,6 +809,21 @@ this.descFunctionDesc = descFunctionDesc; } + /** + * @return cluster ddl desc + */ + @Explain(displayName = "Cluster DDL Operator") + public ClusterDDLDesc getClusterDesc() { + return clusterDesc; + } + + /** + * @param clusterDesc + */ + public void setClusterDesc(ClusterDDLDesc clusterDesc) { + this.clusterDesc = clusterDesc; + } + public HashSet getInputs() { return inputs; } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ClusterDDLDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ClusterDDLDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ClusterDDLDesc.java (revision 0) @@ -0,0 +1,300 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "cluster ddl") +public class ClusterDDLDesc { + + public static enum ClusterOperation { + DROP_CLUSTER("drop_cluster"), ADD_CLUSTER("add_cluster"), + SHOW_CLUSTER("show_cluster"), DESC_CLUSTER("describe_cluster"), + SWITCH_CLUSTER("switch_cluster"), RENAME_CLUSTER ("rename_cluster"), + ALTER_CLUSTER_PROPS ("alter_cluster_props"); + + private String operationName; + + private ClusterOperation() { + } + + private ClusterOperation(String operationName) { + this.operationName = operationName; + } + + /** + * @return operation name + */ + @Explain(displayName = "operation name") + public String getOperationName() { + return operationName; + } + + public String toString() { + return this.operationName; + } + + } + + //used by add cluster + private boolean ifNotExists; + + //used by drop cluster + private boolean ifExists; + + private String resFile; + + private String name; + private String newName; + private ClusterOperation clusterOperation; + private Map clusterProps; + private String clusterLocation; + private String clusterComment; + private HashMap partSpec; + private String tableName; + + private boolean formatted = false; + private boolean extended = false; + + public static ClusterDDLDesc createDescForAddNewCluster( + String clusterName) { + return new ClusterDDLDesc(clusterName, + ClusterOperation.ADD_CLUSTER); + } + + public static ClusterDDLDesc createDescForDropCluster( + String clusterName, boolean ifExists) { + ClusterDDLDesc ddlDesc = + new ClusterDDLDesc(clusterName, ClusterOperation.DROP_CLUSTER); + ddlDesc.ifExists = ifExists; + return ddlDesc; + } + + public static ClusterDDLDesc createDescForSwitchCluster( + String clusterName) { + return new ClusterDDLDesc(clusterName, + ClusterOperation.SWITCH_CLUSTER); + } + + public static ClusterDDLDesc createDescForDescribeCluster( + String clusterName, String resFile) { + ClusterDDLDesc ddlDesc = + new ClusterDDLDesc(clusterName, ClusterOperation.DESC_CLUSTER); + ddlDesc.resFile = resFile; + return ddlDesc; + } + + public static ClusterDDLDesc createDescForAlterClusterRename( + String oldClusterName, String newClusterName) { + ClusterDDLDesc desc = new ClusterDDLDesc(); + desc.name = oldClusterName; + desc.newName = newClusterName; + desc.clusterOperation = ClusterOperation.RENAME_CLUSTER; + return desc; + } + + public static ClusterDDLDesc createDescForAlterClusterProps( + String clusterName, Map newProperties) { + ClusterDDLDesc desc = new ClusterDDLDesc(); + desc.name = clusterName; + desc.clusterProps = newProperties; + desc.clusterOperation = ClusterOperation.ALTER_CLUSTER_PROPS; + return desc; + } + + public static ClusterDDLDesc createDescForShowClusters( + String tableName, HashMap partSpec) { + ClusterDDLDesc desc = new ClusterDDLDesc(); + desc.tableName = tableName; + desc.partSpec = partSpec; + desc.clusterOperation = ClusterOperation.SHOW_CLUSTER; + return desc; + } + + public static ClusterDDLDesc createDescForDescribeCluster( + String clusterName) { + ClusterDDLDesc desc = new ClusterDDLDesc(); + desc.name = clusterName; + desc.clusterOperation = ClusterOperation.DESC_CLUSTER; + return desc; + } + + public ClusterDDLDesc() { + } + + private ClusterDDLDesc(String clusterName, ClusterOperation op) { + this.name = clusterName; + this.clusterOperation = op; + } + + public ClusterDDLDesc(String clusterName, + ClusterOperation clusterOp, boolean ifNotExists, + String clusterComment, Map clusterProps, + String clusterLocation) { + this.name = clusterName; + this.clusterOperation = clusterOp; + this.ifNotExists = ifNotExists; + this.clusterComment = clusterComment; + this.clusterProps = clusterProps; + this.clusterLocation = clusterLocation; + } + + /** + * @return cluster name + */ + @Explain(displayName = "name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * @return new cluster name for rename + */ + @Explain(displayName = "new name") + public String getNewName() { + return newName; + } + + public void setNewName(String newName) { + this.newName = newName; + } + + /** + * @return cluster properties + */ + public HashMap getPartSpec() { + return partSpec; + } + + public void setPartSpec(HashMap partSpec) { + this.partSpec = partSpec; + } + + /** + * @return table name + */ + @Explain(displayName = "table name") + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return cluster operation + */ + @Explain(displayName = "cluster operation") + public ClusterOperation getClusterOperation() { + return clusterOperation; + } + + /** + * @param clusterOperation + */ + public void setClusterOperation(ClusterOperation clusterOperation) { + this.clusterOperation = clusterOperation; + } + + /** + * @return add the cluster operation if not exists + */ + @Explain(displayName="If not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + /** + * @param ifNotExists + */ + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + /** + * @return if exists + */ + public boolean getIfExists() { + return ifExists; + } + + public void setIfExists(boolean ifExists) { + this.ifExists = ifExists; + } + + /** + * @return cluster properties + */ + @Explain(displayName="Cluster properties") + public Map getClusterProps() { + return clusterProps; + } + + /** + * @param clusterProps + */ + public void setClusterProps(Map clusterProps) { + this.clusterProps = clusterProps; + } + + /** + * @return cluster location + */ + @Explain(displayName="Cluster location") + public String getClusterLocation() { + return clusterLocation; + } + + /** + * @param clusterLocation + */ + public void setClusterLocation(String clusterLocation) { + this.clusterLocation = clusterLocation; + } + + /** + * @return cluster comment + */ + @Explain(displayName = "Cluster comment") + public String getClusterComment() { + return clusterComment; + } + + /** + * @param clusterComment + */ + public void setClusterComment(String clusterComment) { + this.clusterComment = clusterComment; + } + + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + + public boolean getFormatted() { + return formatted; + } + + public void setFormatted(boolean formatted) { + this.formatted = formatted; + } + + public boolean getExtended() { + return extended; + } + + public void setExtended(boolean extended) { + this.extended = extended; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -138,8 +138,19 @@ TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; TOK_ALTERINDEX_REBUILD; TOK_ALTERINDEX_PROPERTIES; +TOK_ALTERCLUSTER_RENAME; +TOK_ALTERCLUSTER_PROPERTIES; TOK_MSCK; TOK_SHOWDATABASES; +TOK_CREATECLUSTER; +TOK_SWITCHCLUSTER; +TOK_DROPCLUSTER; +TOK_DESCCLUSTER; +TOK_CLUSTERPROPERTIES; +TOK_CLUSTERPROPLIST; +TOK_CLUSTERLOCATION; +TOK_CLUSTERCOMMENT; +TOK_SHOWCLUSTERS; TOK_SHOWTABLES; TOK_SHOWFUNCTIONS; TOK_SHOWPARTITIONS; @@ -327,6 +338,9 @@ : createDatabaseStatement | switchDatabaseStatement | dropDatabaseStatement + | createClusterStatement + | switchClusterStatement + | dropClusterStatement | createTableStatement | dropTableStatement | alterStatement @@ -395,6 +409,13 @@ -> ^(TOK_CREATEDATABASE $name ifNotExists? dbLocation? databaseComment? $dbprops?) ; +databaseComment +@init { msgs.push("database's comment"); } +@after { msgs.pop(); } + : KW_COMMENT comment=StringLiteral + -> ^(TOK_DATABASECOMMENT $comment) + ; + dbLocation @init { msgs.push("database location specification"); } @after { msgs.pop(); } @@ -416,7 +437,6 @@ keyValueProperty (COMMA keyValueProperty)* -> ^(TOK_DBPROPLIST keyValueProperty+) ; - switchDatabaseStatement @init { msgs.push("switch database statement"); } @after { msgs.pop(); } @@ -431,13 +451,59 @@ -> ^(TOK_DROPDATABASE Identifier ifExists? restrictOrCascade?) ; -databaseComment +createClusterStatement +@init { msgs.push("create cluster statement"); } +@after { msgs.pop(); } + : KW_CREATE (KW_CLUSTER) + ifNotExists? + name=Identifier + clusterComment? + clusterLocation? + (KW_WITH KW_CLUSTERPROPERTIES clusterprops=clusterProperties)? + -> ^(TOK_CREATECLUSTER $name ifNotExists? clusterLocation? clusterComment? $clusterprops?) + ; + +clusterLocation +@init { msgs.push("database location specification"); } +@after { msgs.pop(); } + :KW_LOCATION loc=StringLiteral -> ^(TOK_CLUSTERLOCATION $loc) + ; + +clusterComment @init { msgs.push("database's comment"); } @after { msgs.pop(); } : KW_COMMENT comment=StringLiteral - -> ^(TOK_DATABASECOMMENT $comment) + -> ^(TOK_CLUSTERCOMMENT $comment) ; +clusterProperties +@init { msgs.push("clusterproperties"); } +@after { msgs.pop(); } + : + LPAREN clusterPropertiesList RPAREN -> ^(TOK_CLUSTERPROPERTIES clusterPropertiesList) + ; + +clusterPropertiesList +@init { msgs.push("cluster properties list"); } +@after { msgs.pop(); } + : + keyValueProperty (COMMA keyValueProperty)* -> ^(TOK_CLUSTERPROPLIST keyValueProperty+) + ; + +switchClusterStatement +@init { msgs.push("switch cluster statement"); } +@after { msgs.pop(); } + : KW_USE KW_CLUSTER Identifier + -> ^(TOK_SWITCHCLUSTER Identifier) + ; + +dropClusterStatement +@init { msgs.push("drop cluster statement"); } +@after { msgs.pop(); } + :KW_DROP KW_CLUSTER ifExists? Identifier + ->^(TOK_DROPCLUSTER Identifier ifExists?) + ; + createTableStatement @init { msgs.push("create table statement"); } @after { msgs.pop(); } @@ -560,6 +626,7 @@ KW_INDEX! alterIndexStatementSuffix | KW_DATABASE! alterDatabaseStatementSuffix + | KW_CLUSTER! alterClusterStatementSuffix ) ; @@ -607,6 +674,27 @@ ) ; +alterClusterStatementSuffix +@init { msgs.push("alter cluster statement"); } +@after { msgs.pop(); } + : alterClusterStatementSuffixProperties + | alterClusterStatementSuffixRename + ; + +alterClusterStatementSuffixRename +@init { msgs.push("rename cluster statement"); } +@after { msgs.pop(); } + : oldName=Identifier KW_RENAME KW_TO newName=Identifier + -> ^(TOK_ALTERCLUSTER_RENAME $oldName $newName) + ; + +alterClusterStatementSuffixProperties +@init { msgs.push("alter cluster properties statement"); } +@after {msgs.pop();} + : name=Identifier KW_SET KW_CLUSTERPROPERTIES clusterProperties + -> ^(TOK_ALTERCLUSTER_PROPERTIES $name clusterProperties) + ; + alterDatabaseStatementSuffix @init { msgs.push("alter database statement"); } @after { msgs.pop(); } @@ -829,6 +917,8 @@ : (KW_DESCRIBE|KW_DESC) (descOptions=KW_FORMATTED|descOptions=KW_EXTENDED)? (parttype=partTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions?) | (KW_DESCRIBE|KW_DESC) KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?) | (KW_DESCRIBE|KW_DESC) KW_DATABASE KW_EXTENDED? (dbName=Identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) + | (KW_DESCRIBE|KW_DESC) KW_CLUSTER (descOptions=KW_FORMATTED|descOptions=KW_EXTENDED)? (clusterName=Identifier) + -> ^(TOK_DESCCLUSTER $clusterName $descOptions?) ; analyzeStatement @@ -841,6 +931,7 @@ @init { msgs.push("show statement"); } @after { msgs.pop(); } : KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?) + | KW_SHOW KW_CLUSTERS (KW_ON tablePartitionPrefix)? -> ^(TOK_SHOWCLUSTERS tablePartitionPrefix?) | KW_SHOW KW_TABLES ((KW_FROM|KW_IN) db_name=Identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWTABLES (TOK_FROM $db_name)? showStmtIdentifier?) | KW_SHOW KW_FUNCTIONS showStmtIdentifier? -> ^(TOK_SHOWFUNCTIONS showStmtIdentifier?) | KW_SHOW KW_PARTITIONS Identifier partitionSpec? -> ^(TOK_SHOWPARTITIONS Identifier partitionSpec?) @@ -2276,6 +2367,7 @@ KW_SET: 'SET'; KW_TBLPROPERTIES: 'TBLPROPERTIES'; KW_IDXPROPERTIES: 'IDXPROPERTIES'; +KW_CLUSTERPROPERTIES: 'CLUSTERPROPERTIES'; KW_VALUE_TYPE: '$VALUE$'; KW_ELEM_TYPE: '$ELEM$'; KW_CASE: 'CASE'; @@ -2299,6 +2391,7 @@ KW_IN: 'IN'; KW_DATABASE: 'DATABASE'; KW_DATABASES: 'DATABASES'; +KW_CLUSTERS: 'CLUSTERS'; KW_MATERIALIZED: 'MATERIALIZED'; KW_SCHEMA: 'SCHEMA'; KW_SCHEMAS: 'SCHEMAS'; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1235046) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -75,6 +75,7 @@ import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; +import org.apache.hadoop.hive.ql.plan.ClusterDDLDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; @@ -337,6 +338,27 @@ case HiveParser.TOK_ALTERDATABASE_PROPERTIES: analyzeAlterDatabase(ast); break; + case HiveParser.TOK_CREATECLUSTER: + analyzeCreateCluster(ast); + break; + case HiveParser.TOK_SWITCHCLUSTER: + analyzeSwitchCluster(ast); + break; + case HiveParser.TOK_DROPCLUSTER: + analyzeDropCluster(ast); + break; + case HiveParser.TOK_SHOWCLUSTERS: + analyzeShowClusters(ast); + break; + case HiveParser.TOK_DESCCLUSTER: + analyzeDescCluster(ast); + break; + case HiveParser.TOK_ALTERCLUSTER_PROPERTIES: + analyzeAlterClusterProps(ast); + break; + case HiveParser.TOK_ALTERCLUSTER_RENAME: + analyzeAlterClusterRename(ast); + break; case HiveParser.TOK_CREATEROLE: analyzeCreateRole(ast); break; @@ -368,6 +390,123 @@ } } + private void analyzeDropCluster(ASTNode ast) { + String clusterName = + unescapeIdentifier(ast.getChild(0).getText()); + boolean ifExists = (ast.getFirstChildWithType(TOK_IFEXISTS) != null); + ClusterDDLDesc dropClusterDesc = + ClusterDDLDesc.createDescForDropCluster(clusterName, ifExists); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), dropClusterDesc), conf)); + } + + private void analyzeSwitchCluster(ASTNode ast) { + String clusterName = + unescapeIdentifier(ast.getChild(0).getText()); + ClusterDDLDesc switchClusterDesc = + ClusterDDLDesc.createDescForSwitchCluster(clusterName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), switchClusterDesc), conf)); + } + + private void analyzeCreateCluster(ASTNode ast) + throws SemanticException { + String clusterName = + unescapeIdentifier(ast.getChild(0).getText()); + + String clusterComment = null; + String clusterSchema = null; + Map clusterProps = null; + boolean ifNotExists = false; + String clusterLocation = null; + + for (int i = 1; i < ast.getChildCount(); i++) { + ASTNode childNode = (ASTNode) ast.getChild(i); + switch (childNode.getToken().getType()) { + case HiveParser.TOK_IFNOTEXISTS: + ifNotExists = true; + break; + case HiveParser.TOK_CLUSTERCOMMENT: + clusterComment = unescapeSQLString(childNode.getChild(0).getText()); + break; + case HiveParser.TOK_CLUSTERPROPERTIES: + clusterProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0)); + break; + case HiveParser.TOK_CLUSTERLOCATION: + clusterLocation = unescapeSQLString(childNode.getChild(0).getText()); + break; + default: + throw new SemanticException("Unrecognized token in CREATE CLUSTER statement"); + } + } + + ClusterDDLDesc addClusterDesc = + new ClusterDDLDesc(clusterName, + ClusterDDLDesc.ClusterOperation.ADD_CLUSTER, ifNotExists, + clusterComment, clusterProps, clusterLocation); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + addClusterDesc), conf)); + } + + private void analyzeAlterClusterRename(ASTNode ast) { + String clusterName = getUnescapedName((ASTNode) ast.getChild(0)); + String newClusterName = + getUnescapedName((ASTNode) ast.getChild(1)); + ClusterDDLDesc renameClusterDesc = + ClusterDDLDesc.createDescForAlterClusterRename(clusterName, + newClusterName); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), renameClusterDesc), conf)); + } + + private void analyzeAlterClusterProps(ASTNode ast) { + String clusterName = getUnescapedName((ASTNode)ast.getChild(0)); + HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) + .getChild(0)); + ClusterDDLDesc alterClusterProps = + ClusterDDLDesc.createDescForAlterClusterProps(clusterName, + mapProp); + rootTasks.add(TaskFactory.get(new DDLWork(alterClusterProps), conf)); + } + + private void analyzeDescCluster(ASTNode ast) { + + String clusterName = getUnescapedName((ASTNode)ast.getChild(0)); + + ClusterDDLDesc clusterDDLDesc = + ClusterDDLDesc.createDescForDescribeCluster(clusterName, ctx + .getResFile().toString()); + if (ast.getChildCount() == 2) { + int descOptions = ast.getChild(1).getType(); + clusterDDLDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED); + clusterDDLDesc.setExtended(descOptions == HiveParser.KW_EXTENDED); + } + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + clusterDDLDesc), conf)); + setFetchTask(createFetchTask(DescTableDesc.getSchema())); + } + + private void analyzeShowClusters(ASTNode ast) + throws SemanticException { + ClusterDDLDesc clusterDDLDesc = null; + String tableName = null; + HashMap partSpec = null; + if (ast.getChildCount() > 0) { + ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); + tableName = + getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0)); + // get partition metadata if partition specified + if (tableTypeExpr.getChildCount() == 2) { + ASTNode partspec = (ASTNode) tableTypeExpr.getChild(1); + partSpec = getPartSpec(partspec); + } + } + clusterDDLDesc = + ClusterDDLDesc.createDescForShowClusters(tableName, partSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), + getOutputs(), clusterDDLDesc), conf)); + } + private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) { List principalDesc = analyzePrincipalListDef( (ASTNode) ast.getChild(0));