diff --git a/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java b/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java index 7fffe13..8b1c4fe 100644 --- a/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java +++ b/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java @@ -149,6 +149,8 @@ public boolean accept(File filePath) { private String cleanupScript; + private String useHBaseMetastore; + public void setHadoopVersion(String ver) { this.hadoopVersion = ver; } @@ -221,6 +223,14 @@ public void setCleanupScript(String cleanupScript) { this.cleanupScript = cleanupScript; } + public String getUseHBaseMetastore() { + return useHBaseMetastore; + } + + public void setUseHBaseMetastore(String useHBaseMetastore) { + this.useHBaseMetastore = useHBaseMetastore; + } + public void setHiveRootDirectory(File hiveRootDirectory) { try { this.hiveRootDirectory = hiveRootDirectory.getCanonicalPath(); @@ -530,6 +540,7 @@ public void execute() throws BuildException { ctx.put("hadoopVersion", hadoopVersion); ctx.put("initScript", initScript); ctx.put("cleanupScript", cleanupScript); + ctx.put("useHBaseMetastore", useHBaseMetastore); File outFile = new File(outDir, className + ".java"); FileWriter writer = new FileWriter(outFile); diff --git a/bin/ext/hbaseimport.cmd b/bin/ext/hbaseimport.cmd new file mode 100644 index 0000000..ff69007 --- /dev/null +++ b/bin/ext/hbaseimport.cmd @@ -0,0 +1,35 @@ +@echo off +@rem Licensed to the Apache Software Foundation (ASF) under one or more +@rem contributor license agreements. See the NOTICE file distributed with +@rem this work for additional information regarding copyright ownership. +@rem The ASF licenses this file to You under the Apache License, Version 2.0 +@rem (the "License"); you may not use this file except in compliance with +@rem the License. You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +set CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseImport +set HIVE_OPTS= +set HADOOP_CLASSPATH= + +pushd %HIVE_LIB% +for /f %%a IN ('dir /b hive-exec-*.jar') do ( + set JAR=%HIVE_LIB%\%%a +) +popd + +if [%1]==[hbaseimport_help] goto :hbaseimport_help + +:hbaseimport + call %HIVE_BIN_PATH%\ext\util\execHiveCmd.cmd %CLASS% +goto :EOF + +:hbaseimport_help + echo "usage hive --hbaseimport" +goto :EOF diff --git a/bin/ext/hbaseimport.sh b/bin/ext/hbaseimport.sh new file mode 100644 index 0000000..638cdcf --- /dev/null +++ b/bin/ext/hbaseimport.sh @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=hbaseimport +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +hbaseimport () { + CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseImport + HIVE_OPTS='' + execHiveCmd $CLASS "$@" +} + +hbaseimport_help () { + echo "usage ./hive hbaseimport" +} diff --git a/bin/ext/hbaseschematool.sh b/bin/ext/hbaseschematool.sh new file mode 100644 index 0000000..4d4570a --- /dev/null +++ b/bin/ext/hbaseschematool.sh @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +THISSERVICE=hbaseschematool +export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " + +hbaseschematool () { + CLASS=org.apache.hadoop.hive.metastore.hbase.HBaseSchemaTool + HIVE_OPTS='' + execHiveCmd $CLASS "$@" +} + +hbaseschematool_help () { + echo "usage ./hive hbaseschematool [-d ] " +} diff --git a/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java b/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java index 4fd7020..3a37207 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java +++ b/common/src/java/org/apache/hadoop/hive/common/ObjectPair.java @@ -75,6 +75,11 @@ public boolean equals(ObjectPair that) { this.getSecond().equals(that.getSecond()); } + @Override + public int hashCode() { + return first.hashCode() * 31 + second.hashCode(); + } + public String toString() { return first + ":" + second; } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 8a00079..401894a 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -18,25 +18,7 @@ package org.apache.hadoop.hive.conf; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Properties; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import javax.security.auth.login.LoginException; - +import com.google.common.base.Joiner; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -54,7 +36,23 @@ import org.apache.hadoop.util.Shell; import org.apache.hive.common.HiveCompat; -import com.google.common.base.Joiner; +import javax.security.auth.login.LoginException; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Hive Configuration. @@ -389,6 +387,48 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { METASTOREURIS("hive.metastore.uris", "", "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."), + METASTORE_FASTPATH("hive.metastore.fastpath", false, + "Used to avoid all of the proxies and object copies in the metastore. Note, if this is " + + "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " + + "undefined and most likely undesired behavior will result"), + METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " + + "objects we will place in the hbase metastore catalog cache. The objects will be divided up by " + + "types that we need to cache."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000, + "Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000, + "Maximum number of partitions that are aggregated per cache node."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability", + (float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1, + "Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."), + METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), + "Number of seconds for a cached node to be active in the cache before they become stale."), + METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a writer will wait to acquire the writelock before giving up."), + METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a reader will wait to acquire the readlock before giving up."), + METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9, + "Maximum cache full % after which the cache cleaner thread kicks in."), + METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8, + "The cleaner thread cleans until cache reaches this % full size."), + METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class", + "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection", + "Class used to connection to HBase"), + METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries", + 10000, "How many in stats objects to cache in memory"), + METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds stats objects live in memory after they are read from HBase."), + METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY( + "hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s", + new TimeValidator(TimeUnit.SECONDS), + "How often the stats cache scans its HBase entries and looks for expired entries"), + METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds stats entries live in HBase cache after they are created. They may be" + + " invalided by updates or partition drops before this. Default is one week."), + METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1, diff --git a/common/src/java/org/apache/hive/common/util/BloomFilter.java b/common/src/java/org/apache/hive/common/util/BloomFilter.java index 656ba8a..d894241 100644 --- a/common/src/java/org/apache/hive/common/util/BloomFilter.java +++ b/common/src/java/org/apache/hive/common/util/BloomFilter.java @@ -18,9 +18,10 @@ package org.apache.hive.common.util; -import static com.google.common.base.Preconditions.checkArgument; - import java.util.Arrays; +import java.util.List; + +import static com.google.common.base.Preconditions.checkArgument; /** * BloomFilter is a probabilistic data structure for set membership check. BloomFilters are @@ -63,6 +64,21 @@ public BloomFilter(long expectedEntries, double fpp) { this.bitSet = new BitSet(numBits); } + /** + * A constructor to support rebuilding the BloomFilter from a serialized representation. + * @param bits + * @param numBits + * @param numFuncs + */ + public BloomFilter(List bits, int numBits, int numFuncs) { + super(); + long[] copied = new long[bits.size()]; + for (int i = 0; i < bits.size(); i++) copied[i] = bits.get(i); + bitSet = new BitSet(copied); + this.numBits = numBits; + numHashFunctions = numFuncs; + } + static int optimalNumOfHashFunctions(long n, long m) { return Math.max(1, (int) Math.round((double) m / n * Math.log(2))); } diff --git a/data/conf/tez/hive-site.xml b/data/conf/tez/hive-site.xml index b4abe90..9897918 100644 --- a/data/conf/tez/hive-site.xml +++ b/data/conf/tez/hive-site.xml @@ -263,4 +263,14 @@ -Dlog4j.configurationFile=tez-container-log4j2.xml -Dtez.container.log.level=INFO -Dtez.container.root.logger=CLA + + hive.metastore.fastpath + true + + + + hive.metastore.rawstore.impl + org.apache.hadoop.hive.metastore.hbase.HBaseStore + + diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml index 508ed31..5295840 100644 --- a/itests/hive-unit/pom.xml +++ b/itests/hive-unit/pom.xml @@ -173,6 +173,20 @@ hadoop-1 + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + + **/metastore/hbase/** + + + + + org.apache.hadoop @@ -285,6 +299,27 @@ test + org.apache.hbase + hbase-server + ${hbase.hadoop2.version} + test-jar + test + + + org.apache.hbase + hbase-hadoop-compat + ${hbase.hadoop2.version} + test-jar + test + + + org.apache.hbase + hbase-hadoop2-compat + ${hbase.hadoop2.version} + test-jar + test + + org.apache.hadoop hadoop-minicluster test diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java index c5e70f9..e9dabee 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java @@ -38,8 +38,8 @@ public void testCreateAdminNAddUser() throws IOException, Throwable { Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN); assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN)); assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, PrincipalType.ROLE) - .get(0).getPrivilege(),"All"); - assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0).getRole(). + .get(0).getGrantInfo().getPrivilege(),"All"); + assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0). getRoleName(),HiveMetaStore.ADMIN); } } \ No newline at end of file diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 160667d..06061c0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -2541,6 +2541,9 @@ public void testSimpleFunction() throws Exception { try { cleanUp(dbName, null, null); + for (Function f : client.getAllFunctions().getFunctions()) { + client.dropFunction(f.getDbName(), f.getFunctionName()); + } createDb(dbName); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java new file mode 100644 index 0000000..5b82579 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import co.cask.tephra.hbase10.coprocessor.TransactionProcessor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; +import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +import org.apache.hadoop.hive.ql.session.SessionState; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(HBaseIntegrationTests.class.getName()); + + protected static HBaseTestingUtility utility; + protected static HBaseAdmin admin; + protected static Map emptyParameters = new HashMap<>(); + protected static HiveConf conf; + + protected HBaseStore store; + protected Driver driver; + + protected static void startMiniCluster() throws Exception { + String connectionClassName = + System.getProperty(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS.varname); + boolean testingTephra = + connectionClassName != null && connectionClassName.equals(TephraHBaseConnection.class.getName()); + if (testingTephra) { + LOG.info("Testing with Tephra"); + } + Configuration hbaseConf = HBaseConfiguration.create(); + hbaseConf.setInt("hbase.master.info.port", -1); + utility = new HBaseTestingUtility(hbaseConf); + utility.startMiniCluster(); + conf = new HiveConf(utility.getConfiguration(), HBaseIntegrationTests.class); + admin = utility.getHBaseAdmin(); + HBaseStoreTestUtil.initHBaseMetastore(admin, null); + } + + protected static void shutdownMiniCluster() throws Exception { + utility.shutdownMiniCluster(); + } + + protected void setupConnection() throws IOException { + + } + + protected void setupDriver() { + // This chicanery is necessary to make the driver work. Hive tests need the pfile file + // system, while the hbase one uses something else. So first make sure we've configured our + // hbase connection, then get a new config file and populate it as desired. + HBaseReadWrite.getInstance(conf); + conf = new HiveConf(); + conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, + "org.apache.hadoop.hive.metastore.hbase.HBaseStore"); + conf.setBoolVar(HiveConf.ConfVars.METASTORE_FASTPATH, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + // Setup so we can test SQL standard auth + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE, true); + conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, + SQLStdHiveAuthorizerFactoryForTest.class.getName()); + conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, + SessionStateConfigUserAuthenticator.class.getName()); + conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true); + conf.setVar(HiveConf.ConfVars.USERS_IN_ADMIN_ROLE, System.getProperty("user.name")); + //HBaseReadWrite.setTestConnection(hconn); + + SessionState.start(new CliSessionState(conf)); + driver = new Driver(conf); + } + + protected void setupHBaseStore() { + // Turn off caching, as we want to test actual interaction with HBase + conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + store = new HBaseStore(); + store.setConf(conf); + } + +} + diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java new file mode 100644 index 0000000..899fee1 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -0,0 +1,691 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class TestHBaseAggrStatsCacheIntegration extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + setupConnection(); + setupHBaseStore(); + store.backdoor().getStatsCache().resetCounters(); + } + + private static interface Checker { + void checkStats(AggrStats aggrStats) throws Exception; + } + + @Test + public void hit() throws Exception { + String dbName = "default"; + String tableName = "hit"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + cols.add(new FieldSchema("col2", "varchar", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName("col2"); + obj.setColType("varchar"); + data = new ColumnStatisticsData(); + StringColumnStatsData scsd = new StringColumnStatsData(); + scsd.setAvgColLen(10.3); + scsd.setMaxColLen(2000); + scsd.setNumNulls(3); + scsd.setNumDVs(12342); + data.setStringStats(scsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(2, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + + cso = aggrStats.getColStats().get(1); + Assert.assertEquals("col2", cso.getColName()); + Assert.assertEquals("varchar", cso.getColType()); + StringColumnStatsData scsd = cso.getStatsData().getStringStats(); + Assert.assertEquals(10.3, scsd.getAvgColLen(), 0.1); + Assert.assertEquals(2000, scsd.getMaxColLen()); + Assert.assertEquals(6, scsd.getNumNulls()); + Assert.assertEquals(12342, scsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + store.backdoor().getStatsCache().flushMemory(); + // Call again, this time it should come from hbase + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1", "col2")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(2, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void someWithStats() throws Exception { + String dbName = "default"; + String tableName = "psws"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "long", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + boolean first = true; + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/psws/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + if (first) { + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("long"); + ColumnStatisticsData data = new ColumnStatisticsData(); + LongColumnStatsData lcsd = new LongColumnStatsData(); + lcsd.setHighValue(192L); + lcsd.setLowValue(-20L); + lcsd.setNumNulls(30); + lcsd.setNumDVs(32); + data.setLongStats(lcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + first = false; + } + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("long", cso.getColType()); + LongColumnStatsData lcsd = cso.getStatsData().getLongStats(); + Assert.assertEquals(192L, lcsd.getHighValue()); + Assert.assertEquals(-20L, lcsd.getLowValue()); + Assert.assertEquals(30, lcsd.getNumNulls()); + Assert.assertEquals(32, lcsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + store.backdoor().getStatsCache().flushMemory(); + // Call again, this time it should come from hbase + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(1, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void invalidation() throws Exception { + try { + String dbName = "default"; + String tableName = "invalidation"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + List partVals3 = Arrays.asList("tomorrow"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Now call a different combination to get it in memory too + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // wake the invalidator and check again to make sure it isn't too aggressive about + // removing our stuff. + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(5, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // Update statistics for 'tomorrow' + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals3.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(100); + bcsd.setNumTrues(200); + bcsd.setNumNulls(300); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + Checker afterUpdate = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(110, bcsd.getNumFalses()); + Assert.assertEquals(220, bcsd.getNumTrues()); + Assert.assertEquals(330, bcsd.getNumNulls()); + } + }; + + store.updatePartitionColumnStatistics(cs, partVals3); + + store.backdoor().getStatsCache().setRunInvalidatorEvery(100); + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + afterUpdate.checkStats(aggrStats); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(6, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate is still in the cache. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(7, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Drop 'yesterday', so our first aggregate should be dumped from memory and hbase + store.dropPartition(dbName, tableName, partVals2); + + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(10, bcsd.getNumFalses()); + Assert.assertEquals(20, bcsd.getNumTrues()); + Assert.assertEquals(30, bcsd.getNumNulls()); + } + }.checkStats(aggrStats); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(8, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate is still in the cache. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + afterUpdate.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(9, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); + } finally { + store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); + store.backdoor().getStatsCache().setMaxTimeInCache(500000); + store.backdoor().getStatsCache().wakeInvalidator(); + } + } + + @Test + public void alterInvalidation() throws Exception { + try { + String dbName = "default"; + String tableName = "ai"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + List partVals3 = Arrays.asList("tomorrow"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + Partition[] partitions = new Partition[3]; + int partnum = 0; + for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + partitions[partnum++] = part; + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=tomorrow"), Arrays.asList("col1")); + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // wake the invalidator and check again to make sure it isn't too aggressive about + // removing our stuff. + store.backdoor().getStatsCache().wakeInvalidator(); + + Partition newPart = new Partition(partitions[2]); + newPart.setLastAccessTime((int)System.currentTimeMillis()); + store.alterPartition(dbName, tableName, partVals3, newPart); + + store.backdoor().getStatsCache().setRunInvalidatorEvery(100); + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate is still in the cache. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + } finally { + store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); + store.backdoor().getStatsCache().setMaxTimeInCache(500000); + store.backdoor().getStatsCache().wakeInvalidator(); + } + } + + @Test + public void altersInvalidation() throws Exception { + try { + String dbName = "default"; + String tableName = "asi"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + List partVals3 = Arrays.asList("tomorrow"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + Partition[] partitions = new Partition[3]; + int partnum = 0; + for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/invalidation/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + partitions[partnum++] = part; + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=tomorrow"), Arrays.asList("col1")); + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().misses.getCnt()); + + // wake the invalidator and check again to make sure it isn't too aggressive about + // removing our stuff. + store.backdoor().getStatsCache().wakeInvalidator(); + + Partition[] newParts = new Partition[2]; + newParts[0] = new Partition(partitions[0]); + newParts[0].setLastAccessTime((int)System.currentTimeMillis()); + newParts[1] = new Partition(partitions[2]); + newParts[1].setLastAccessTime((int) System.currentTimeMillis()); + store.alterPartitions(dbName, tableName, Arrays.asList(partVals1, partVals3), + Arrays.asList(newParts)); + + store.backdoor().getStatsCache().setRunInvalidatorEvery(100); + store.backdoor().getStatsCache().wakeInvalidator(); + + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=tomorrow", "ds=today"), Arrays.asList("col1")); + + // Check that we missed, which means this aggregate was dropped from the cache. + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(3, store.backdoor().getStatsCache().misses.getCnt()); + + // Check that our other aggregate got dropped too + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(4, store.backdoor().getStatsCache().misses.getCnt()); + } finally { + store.backdoor().getStatsCache().setRunInvalidatorEvery(5000); + store.backdoor().getStatsCache().setMaxTimeInCache(500000); + store.backdoor().getStatsCache().wakeInvalidator(); + } + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java new file mode 100644 index 0000000..2d2bd46 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -0,0 +1,650 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.TestObjectStore; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Test that import from an RDBMS based metastore works + */ +public class TestHBaseImport extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseImport.class.getName()); + + private static final String[] tableNames = new String[] {"allnonparttable", "allparttable"}; + private static final String[] partVals = new String[] {"na", "emea", "latam", "apac"}; + private static final String[] funcNames = new String[] {"allfunc1", "allfunc2"}; + + private static final List masterKeySeqs = new ArrayList(); + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + TestObjectStore.dropAllStoreObjects(rdbms); + } + + @AfterClass + public static void shutdown() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + TestObjectStore.dropAllStoreObjects(rdbms); + for (int seq : masterKeySeqs) { + rdbms.removeMasterKey(seq); + } + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + setupConnection(); + setupHBaseStore(); + } + + @Test + public void importAll() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"alldb1", "alldb2"}; + String[] roles = new String[] {"allrole1", "allrole2"}; + String[] tokenIds = new String[] {"alltokenid1", "alltokenid2"}; + String[] tokens = new String[] {"alltoken1", "alltoken2"}; + String[] masterKeys = new String[] {"allmk1", "allmk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + + HBaseImport importer = new HBaseImport("-a"); + importer.setConnections(rdbms, store); + importer.run(); + + for (int i = 0; i < roles.length; i++) { + Role role = store.getRole(roles[i]); + Assert.assertNotNull(role); + Assert.assertEquals(roles[i], role.getRoleName()); + } + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles + 2, store.listRoleNames().size()); + + for (int i = 0; i < dbNames.length; i++) { + Database db = store.getDatabase(dbNames[i]); + Assert.assertNotNull(db); + // check one random value in the db rather than every value + Assert.assertEquals("file:/tmp", db.getLocationUri()); + + Table table = store.getTable(db.getName(), tableNames[0]); + Assert.assertNotNull(table); + Assert.assertEquals(now, table.getLastAccessTime()); + Assert.assertEquals("input", table.getSd().getInputFormat()); + + table = store.getTable(db.getName(), tableNames[1]); + Assert.assertNotNull(table); + + for (int j = 0; j < partVals.length; j++) { + Partition part = store.getPartition(dbNames[i], tableNames[1], Arrays.asList(partVals[j])); + Assert.assertNotNull(part); + Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); + } + + Assert.assertEquals(4, store.getPartitions(dbNames[i], tableNames[1], -1).size()); + Assert.assertEquals(2, store.getAllTables(dbNames[i]).size()); + + Assert.assertEquals(2, store.getFunctions(dbNames[i], "*").size()); + for (int j = 0; j < funcNames.length; j++) { + Assert.assertNotNull(store.getFunction(dbNames[i], funcNames[j])); + } + } + + Assert.assertEquals(baseNumDbs + 2, store.getAllDatabases().size()); + + // I can't test total number of tokens or master keys because the import grabs all and copies + // them, which means it grabs the ones imported by importSecurity test (if it's already run). + // Depending on it already running would make the tests order dependent, which junit doesn't + // guarantee. + for (int i = 0; i < tokenIds.length; i++) { + Assert.assertEquals(tokens[i], store.getToken(tokenIds[i])); + } + String[] hbaseKeys = store.getMasterKeys(); + Set keys = new HashSet<>(Arrays.asList(hbaseKeys)); + for (int i = 0; i < masterKeys.length; i++) { + Assert.assertTrue(keys.contains(masterKeys[i])); + } + } + + @Test + public void importOneDb() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"onedbdb1", "onedbdb2"}; + String[] roles = new String[] {"onedbrole1", "onedbrole2"}; + String[] tokenIds = new String[] {"onedbtokenid1", "onedbtokenid2"}; + String[] tokens = new String[] {"onedbtoken1", "onedbtoken2"}; + String[] masterKeys = new String[] {"onedbmk1", "onedbmk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : + store.getAllTokenIdentifiers().size(); + int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; + + HBaseImport importer = new HBaseImport("-d", dbNames[0]); + importer.setConnections(rdbms, store); + importer.run(); + + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); + + Database db = store.getDatabase(dbNames[0]); + Assert.assertNotNull(db); + // check one random value in the db rather than every value + Assert.assertEquals("file:/tmp", db.getLocationUri()); + + Table table = store.getTable(db.getName(), tableNames[0]); + Assert.assertNotNull(table); + Assert.assertEquals(now, table.getLastAccessTime()); + Assert.assertEquals("input", table.getSd().getInputFormat()); + + table = store.getTable(db.getName(), tableNames[1]); + Assert.assertNotNull(table); + + for (int j = 0; j < partVals.length; j++) { + Partition part = store.getPartition(dbNames[0], tableNames[1], Arrays.asList(partVals[j])); + Assert.assertNotNull(part); + Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); + } + + Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size()); + Assert.assertEquals(2, store.getAllTables(dbNames[0]).size()); + + Assert.assertEquals(2, store.getFunctions(dbNames[0], "*").size()); + for (int j = 0; j < funcNames.length; j++) { + Assert.assertNotNull(store.getFunction(dbNames[0], funcNames[j])); + } + + Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); + + Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); + String[] hbaseKeys = store.getMasterKeys(); + Assert.assertEquals(baseNumKeys, hbaseKeys.length); + + // Have to do this last as it will throw an exception + thrown.expect(NoSuchObjectException.class); + store.getDatabase(dbNames[1]); + } + + @Test + public void importOneFunc() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"onefuncdb1", "onefuncdb2"}; + String[] roles = new String[] {"onefuncrole1", "onefuncrole2"}; + String[] tokenIds = new String[] {"onefunctokenid1", "onefunctokenid2"}; + String[] tokens = new String[] {"onefunctoken1", "onefunctoken2"}; + String[] masterKeys = new String[] {"onefuncmk1", "onefuncmk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : + store.getAllTokenIdentifiers().size(); + int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; + + // Create the database so I can put the function in it. + store.createDatabase( + new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); + + HBaseImport importer = new HBaseImport("-f", dbNames[0] + "." + funcNames[0]); + importer.setConnections(rdbms, store); + importer.run(); + + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); + + Database db = store.getDatabase(dbNames[0]); + Assert.assertNotNull(db); + + Assert.assertEquals(0, store.getAllTables(dbNames[0]).size()); + Assert.assertEquals(1, store.getFunctions(dbNames[0], "*").size()); + Assert.assertNotNull(store.getFunction(dbNames[0], funcNames[0])); + Assert.assertNull(store.getFunction(dbNames[0], funcNames[1])); + + Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); + + Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); + String[] hbaseKeys = store.getMasterKeys(); + Assert.assertEquals(baseNumKeys, hbaseKeys.length); + } + + @Test + public void importOneTableNonPartitioned() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"onetabdb1", "onetabdb2"}; + String[] roles = new String[] {"onetabrole1", "onetabrole2"}; + String[] tokenIds = new String[] {"onetabtokenid1", "onetabtokenid2"}; + String[] tokens = new String[] {"onetabtoken1", "onetabtoken2"}; + String[] masterKeys = new String[] {"onetabmk1", "onetabmk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : + store.getAllTokenIdentifiers().size(); + int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; + + // Create the database so I can put the table in it. + store.createDatabase( + new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); + + HBaseImport importer = new HBaseImport("-t", dbNames[0] + "." + tableNames[0]); + importer.setConnections(rdbms, store); + importer.run(); + + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); + + Database db = store.getDatabase(dbNames[0]); + Assert.assertNotNull(db); + + Table table = store.getTable(db.getName(), tableNames[0]); + Assert.assertNotNull(table); + Assert.assertEquals(1, store.getAllTables(db.getName()).size()); + Assert.assertNull(store.getTable(db.getName(), tableNames[1])); + + Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); + Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); + + Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); + String[] hbaseKeys = store.getMasterKeys(); + Assert.assertEquals(baseNumKeys, hbaseKeys.length); + } + + @Test + public void importOneTablePartitioned() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"onetabpartdb1", "onetabpartodb2"}; + String[] roles = new String[] {"onetabpartorole1", "onetabpartorole2"}; + String[] tokenIds = new String[] {"onetabpartotokenid1", "onetabpartotokenid2"}; + String[] tokens = new String[] {"onetabpartotoken1", "onetabpartotoken2"}; + String[] masterKeys = new String[] {"onetabpartomk1", "onetabpartomk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : + store.getAllTokenIdentifiers().size(); + int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; + + // Create the database so I can put the table in it. + store.createDatabase( + new Database(dbNames[0], "no description", "file:/tmp", emptyParameters)); + + HBaseImport importer = new HBaseImport("-t", dbNames[0] + "." + tableNames[1]); + importer.setConnections(rdbms, store); + importer.run(); + + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); + + Database db = store.getDatabase(dbNames[0]); + Assert.assertNotNull(db); + + Table table = store.getTable(db.getName(), tableNames[1]); + Assert.assertNotNull(table); + Assert.assertEquals(1, store.getAllTables(db.getName()).size()); + + for (int j = 0; j < partVals.length; j++) { + Partition part = store.getPartition(dbNames[0], tableNames[1], Arrays.asList(partVals[j])); + Assert.assertNotNull(part); + Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation()); + } + Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size()); + + Assert.assertNull(store.getTable(db.getName(), tableNames[0])); + + Assert.assertEquals(0, store.getFunctions(dbNames[0], "*").size()); + Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size()); + + Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); + String[] hbaseKeys = store.getMasterKeys(); + Assert.assertEquals(baseNumKeys, hbaseKeys.length); + } + + @Test + public void importSecurity() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"securitydb1", "securitydb2"}; + String[] roles = new String[] {"securityrole1", "securityrole2"}; + String[] tokenIds = new String[] {"securitytokenid1", "securitytokenid2"}; + String[] tokens = new String[] {"securitytoken1", "securitytoken2"}; + String[] masterKeys = new String[] {"securitymk1", "securitymk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + + HBaseImport importer = new HBaseImport("-k"); + importer.setConnections(rdbms, store); + importer.run(); + + Assert.assertEquals(baseNumRoles, store.listRoleNames().size()); + + Assert.assertEquals(baseNumDbs, store.getAllDatabases().size()); + + // I can't test total number of tokens or master keys because the import grabs all and copies + // them, which means it grabs the ones imported by importAll test (if it's already run). + // Depending on it already running would make the tests order dependent, which junit doesn't + // guarantee. + for (int i = 0; i < tokenIds.length; i++) { + Assert.assertEquals(tokens[i], store.getToken(tokenIds[i])); + } + String[] hbaseKeys = store.getMasterKeys(); + Set keys = new HashSet<>(Arrays.asList(hbaseKeys)); + for (int i = 0; i < masterKeys.length; i++) { + Assert.assertTrue(keys.contains(masterKeys[i])); + } + } + + // TODO test for bogus function name + // TODO test for bogus table name + // TODO test for non-existent items + + @Test + public void importOneRole() throws Exception { + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"oneroledb1", "oneroledb2"}; + String[] roles = new String[] {"onerolerole1", "onerolerole2"}; + String[] tokenIds = new String[] {"oneroletokenid1", "oneroletokenid2"}; + String[] tokens = new String[] {"oneroletoken1", "oneroletoken2"}; + String[] masterKeys = new String[] {"onerolemk1", "onerolemk2"}; + int now = (int)System.currentTimeMillis() / 1000; + + setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now); + + int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size(); + int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size(); + int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : + store.getAllTokenIdentifiers().size(); + int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length; + + HBaseImport importer = new HBaseImport("-r", roles[0]); + importer.setConnections(rdbms, store); + importer.run(); + + Role role = store.getRole(roles[0]); + Assert.assertNotNull(role); + Assert.assertEquals(roles[0], role.getRoleName()); + + // Make sure there aren't any extra roles + Assert.assertEquals(baseNumRoles + 1, store.listRoleNames().size()); + Assert.assertEquals(baseNumDbs, store.getAllDatabases().size()); + + Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size()); + String[] hbaseKeys = store.getMasterKeys(); + Assert.assertEquals(baseNumKeys, hbaseKeys.length); + + // Have to do this last as it will throw an exception + thrown.expect(NoSuchObjectException.class); + store.getRole(roles[1]); + } + + private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, + String[] tokenIds, String[] tokens, String[] masterKeys, int now) + throws MetaException, InvalidObjectException, NoSuchObjectException { + for (int i = 0; i < roles.length; i++) { + rdbms.addRole(roles[i], "me"); + } + + for (int i = 0; i < dbNames.length; i++) { + rdbms.createDatabase( + new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + rdbms.createTable(new Table(tableNames[0], dbNames[i], "me", now, now, 0, sd, null, + emptyParameters, null, null, null)); + + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("region", "string", "")); + rdbms.createTable(new Table(tableNames[1], dbNames[i], "me", now, now, 0, sd, partCols, + emptyParameters, null, null, null)); + + for (int j = 0; j < partVals.length; j++) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/region=" + partVals[j]); + Partition part = new Partition(Arrays.asList(partVals[j]), dbNames[i], tableNames[1], + now, now, psd, emptyParameters); + rdbms.addPartition(part); + } + + for (String funcName : funcNames) { + LOG.debug("Creating new function " + dbNames[i] + "." + funcName); + rdbms.createFunction(new Function(funcName, dbNames[i], "classname", "ownername", + PrincipalType.USER, (int) System.currentTimeMillis() / 1000, FunctionType.JAVA, + Arrays.asList(new ResourceUri(ResourceType.JAR, "uri")))); + } + } + for (int i = 0; i < tokenIds.length; i++) rdbms.addToken(tokenIds[i], tokens[i]); + for (int i = 0; i < masterKeys.length; i++) { + masterKeySeqs.add(rdbms.addMasterKey(masterKeys[i])); + } + } + + @Test + public void parallel() throws Exception { + int parallelFactor = 10; + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"paralleldb1"}; + int now = (int)System.currentTimeMillis() / 1000; + + for (int i = 0; i < dbNames.length; i++) { + rdbms.createDatabase( + new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("region", "string", "")); + for (int j = 0; j < parallelFactor; j++) { + rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, + emptyParameters, null, null, null)); + for (int k = 0; k < parallelFactor; k++) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/region=" + k); + Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, + now, now, psd, emptyParameters); + rdbms.addPartition(part); + } + } + } + + HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]); + importer.setConnections(rdbms, store); + importer.run(); + + for (int i = 0; i < dbNames.length; i++) { + Database db = store.getDatabase(dbNames[i]); + Assert.assertNotNull(db); + + for (int j = 0; j < parallelFactor; j++) { + Table table = store.getTable(db.getName(), "t" + j); + Assert.assertNotNull(table); + Assert.assertEquals(now, table.getLastAccessTime()); + Assert.assertEquals("input", table.getSd().getInputFormat()); + + for (int k = 0; k < parallelFactor; k++) { + Partition part = + store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k)); + Assert.assertNotNull(part); + Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation()); + } + + Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size()); + } + Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size()); + + } + } + + // Same as the test above except we create 9 of everything instead of 10. This is important + // because in using a batch size of 2 the previous test guarantees 10 /2 =5 , meaning we'll + // have 5 writes on the partition queue with exactly 2 entries. In this test we'll handle the + // case where the last entry in the queue has fewer partitions. + @Test + public void parallelOdd() throws Exception { + int parallelFactor = 9; + RawStore rdbms; + rdbms = new ObjectStore(); + rdbms.setConf(conf); + + String[] dbNames = new String[] {"oddparalleldb1"}; + int now = (int)System.currentTimeMillis() / 1000; + + for (int i = 0; i < dbNames.length; i++) { + rdbms.createDatabase( + new Database(dbNames[i], "no description", "file:/tmp", emptyParameters)); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("region", "string", "")); + for (int j = 0; j < parallelFactor; j++) { + rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, + emptyParameters, null, null, null)); + for (int k = 0; k < parallelFactor; k++) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/region=" + k); + Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, + now, now, psd, emptyParameters); + rdbms.addPartition(part); + } + } + } + + HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]); + importer.setConnections(rdbms, store); + importer.run(); + + for (int i = 0; i < dbNames.length; i++) { + Database db = store.getDatabase(dbNames[i]); + Assert.assertNotNull(db); + + for (int j = 0; j < parallelFactor; j++) { + Table table = store.getTable(db.getName(), "t" + j); + Assert.assertNotNull(table); + Assert.assertEquals(now, table.getLastAccessTime()); + Assert.assertEquals("input", table.getSd().getInputFormat()); + + for (int k = 0; k < parallelFactor; k++) { + Partition part = + store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k)); + Assert.assertNotNull(part); + Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation()); + } + + Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size()); + } + Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size()); + + } + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java new file mode 100644 index 0000000..c61ebb7 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java @@ -0,0 +1,223 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; + +/** + * Integration tests with HBase Mini-cluster using actual SQL + */ +public class TestHBaseMetastoreSql extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void before() throws IOException { + setupConnection(); + setupDriver(); + } + + @Test + public void insertIntoTable() throws Exception { + driver.run("create table iit (c int)"); + CommandProcessorResponse rsp = driver.run("insert into table iit values (3)"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void insertIntoPartitionTable() throws Exception { + driver.run("create table iipt (c int) partitioned by (ds string)"); + CommandProcessorResponse rsp = + driver.run("insert into table iipt partition(ds) values (1, 'today'), (2, 'yesterday')," + + "(3, 'tomorrow')"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void database() throws Exception { + CommandProcessorResponse rsp = driver.run("create database db"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("set role admin"); + Assert.assertEquals(0, rsp.getResponseCode()); + // security doesn't let me change the properties + rsp = driver.run("alter database db set dbproperties ('key' = 'value')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("drop database db"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void table() throws Exception { + driver.run("create table tbl (c int)"); + CommandProcessorResponse rsp = driver.run("insert into table tbl values (3)"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("select * from tbl"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table tbl set tblproperties ('example', 'true')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("drop table tbl"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void partitionedTable() throws Exception { + driver.run("create table parttbl (c int) partitioned by (ds string)"); + CommandProcessorResponse rsp = + driver.run("insert into table parttbl partition(ds) values (1, 'today'), (2, 'yesterday')" + + ", (3, 'tomorrow')"); + Assert.assertEquals(0, rsp.getResponseCode()); + // Do it again, to check insert into existing partitions + rsp = driver.run("insert into table parttbl partition(ds) values (4, 'today'), (5, 'yesterday')" + + ", (6, 'tomorrow')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("insert into table parttbl partition(ds = 'someday') values (1)"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("insert into table parttbl partition(ds = 'someday') values (2)"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table parttbl add partition (ds = 'whenever')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("insert into table parttbl partition(ds = 'whenever') values (2)"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table parttbl touch partition (ds = 'whenever')"); + Assert.assertEquals(0, rsp.getResponseCode()); + // TODO - Can't do this until getPartitionsByExpr implemented + /* + rsp = driver.run("alter table parttbl drop partition (ds = 'whenever')"); + Assert.assertEquals(0, rsp.getResponseCode()); + */ + rsp = driver.run("select * from parttbl"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("select * from parttbl where ds = 'today'"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void role() throws Exception { + CommandProcessorResponse rsp = driver.run("set role admin"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("create role role1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("grant role1 to user fred with admin option"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("create role role2"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("grant role1 to role role2"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("show principals role1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("show role grant role role1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("show role grant user " + System.getProperty("user.name")); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("show roles"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("revoke admin option for role1 from user fred"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("revoke role1 from user fred"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("revoke role1 from role role2"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("show current roles"); + Assert.assertEquals(0, rsp.getResponseCode()); + + rsp = driver.run("drop role role2"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("drop role role1"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void grant() throws Exception { + CommandProcessorResponse rsp = driver.run("set role admin"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("create role role3"); + Assert.assertEquals(0, rsp.getResponseCode()); + driver.run("create table granttbl (c int)"); + Assert.assertEquals(0, rsp.getResponseCode()); + driver.run("grant select on granttbl to " + System.getProperty("user.name")); + Assert.assertEquals(0, rsp.getResponseCode()); + driver.run("grant select on granttbl to role3 with grant option"); + Assert.assertEquals(0, rsp.getResponseCode()); + driver.run("revoke select on granttbl from " + System.getProperty("user.name")); + Assert.assertEquals(0, rsp.getResponseCode()); + driver.run("revoke grant option for select on granttbl from role3"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void describeNonpartitionedTable() throws Exception { + CommandProcessorResponse rsp = driver.run("create table alter1(a int, b int)"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe extended alter1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table alter1 set serdeproperties('s1'='9')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe extended alter1"); + Assert.assertEquals(0, rsp.getResponseCode()); + } + + @Test + public void alterRenamePartitioned() throws Exception { + driver.run("create table alterrename (c int) partitioned by (ds string)"); + driver.run("alter table alterrename add partition (ds = 'a')"); + CommandProcessorResponse rsp = driver.run("describe extended alterrename partition (ds='a')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table alterrename rename to alter_renamed"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe extended alter_renamed partition (ds='a')"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe extended alterrename partition (ds='a')"); + Assert.assertEquals(10001, rsp.getResponseCode()); + } + + @Test + public void alterRename() throws Exception { + driver.run("create table alterrename1 (c int)"); + CommandProcessorResponse rsp = driver.run("describe alterrename1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("alter table alterrename1 rename to alter_renamed1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe alter_renamed1"); + Assert.assertEquals(0, rsp.getResponseCode()); + rsp = driver.run("describe alterrename1"); + Assert.assertEquals(10001, rsp.getResponseCode()); + } + + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java new file mode 100644 index 0000000..8b0b431 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -0,0 +1,1794 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class TestHBaseStoreIntegration extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + setupConnection(); + setupHBaseStore(); + } + + @Test + public void createDb() throws Exception { + String dbname = "mydb"; + Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + + Database d = store.getDatabase("mydb"); + Assert.assertEquals(dbname, d.getName()); + Assert.assertEquals("no description", d.getDescription()); + Assert.assertEquals("file:///tmp", d.getLocationUri()); + } + + @Test + public void dropDb() throws Exception { + String dbname = "anotherdb"; + Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + + Database d = store.getDatabase(dbname); + Assert.assertNotNull(d); + + store.dropDatabase(dbname); + thrown.expect(NoSuchObjectException.class); + store.getDatabase(dbname); + } + + @Test + public void getAllDbs() throws Exception { + String[] dbNames = new String[3]; + for (int i = 0; i < dbNames.length; i++) { + dbNames[i] = "db" + i; + Database db = new Database(dbNames[i], "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + } + + List dbs = store.getAllDatabases(); + Assert.assertEquals(3, dbs.size()); + String[] namesFromStore = dbs.toArray(new String[3]); + Arrays.sort(namesFromStore); + Assert.assertArrayEquals(dbNames, namesFromStore); + } + + @Test + public void getDbsRegex() throws Exception { + String[] dbNames = new String[3]; + for (int i = 0; i < dbNames.length; i++) { + dbNames[i] = "db" + i; + Database db = new Database(dbNames[i], "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + } + + List dbs = store.getDatabases("db1|db2"); + Assert.assertEquals(2, dbs.size()); + String[] namesFromStore = dbs.toArray(new String[2]); + Arrays.sort(namesFromStore); + Assert.assertArrayEquals(Arrays.copyOfRange(dbNames, 1, 3), namesFromStore); + + dbs = store.getDatabases("db*"); + Assert.assertEquals(3, dbs.size()); + namesFromStore = dbs.toArray(new String[3]); + Arrays.sort(namesFromStore); + Assert.assertArrayEquals(dbNames, namesFromStore); + } + + @Test + public void getFuncsRegex() throws Exception { + String dbname = "default"; + int now = (int)(System.currentTimeMillis()/1000); + String[] funcNames = new String[3]; + for (int i = 0; i < funcNames.length; i++) { + funcNames[i] = "func" + i; + store.createFunction(new Function(funcNames[i], dbname, "o.a.h.h.myfunc", "me", + PrincipalType.USER, now, FunctionType.JAVA, + Arrays.asList(new ResourceUri(ResourceType.JAR, + "file:/tmp/somewhere")))); + } + + List funcs = store.getFunctions(dbname, "func1|func2"); + Assert.assertEquals(2, funcs.size()); + String[] namesFromStore = funcs.toArray(new String[2]); + Arrays.sort(namesFromStore); + Assert.assertArrayEquals(Arrays.copyOfRange(funcNames, 1, 3), namesFromStore); + + funcs = store.getFunctions(dbname, "func*"); + Assert.assertEquals(3, funcs.size()); + namesFromStore = funcs.toArray(new String[3]); + Arrays.sort(namesFromStore); + Assert.assertArrayEquals(funcNames, namesFromStore); + + funcs = store.getFunctions("nosuchdb", "func*"); + Assert.assertEquals(0, funcs.size()); + } + + @Test + public void createTable() throws Exception { + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", "mytable"); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals("mytable", t.getTableName()); + } + + @Test + public void alterTable() throws Exception { + String tableName = "alttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + startTime += 10; + table.setLastAccessTime(startTime); + LOG.debug("XXX alter table test"); + store.alterTable("default", tableName, table); + + Table t = store.getTable("default", tableName); + LOG.debug("Alter table time " + t.getLastAccessTime()); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(startTime, t.getLastAccessTime()); + } + + @Test + public void getAllTables() throws Exception { + String dbNames[] = new String[]{"db0", "db1"}; // named to match getAllDbs so we get the + // right number of databases in that test. + String tableNames[] = new String[]{"curly", "larry", "moe"}; + + for (int i = 0; i < dbNames.length; i++) { + store.createDatabase(new Database(dbNames[i], "no description", "file:///tmp", + emptyParameters)); + } + + for (int i = 0; i < dbNames.length; i++) { + for (int j = 0; j < tableNames.length; j++) { + int startTime = (int) (System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, + 0, + serde, null, null, emptyParameters); + Table table = new Table(tableNames[j], dbNames[i], "me", startTime, startTime, 0, sd, + null, + emptyParameters, null, null, null); + store.createTable(table); + } + } + + List fetchedNames = store.getAllTables(dbNames[0]); + Assert.assertEquals(3, fetchedNames.size()); + String[] sortedFetchedNames = fetchedNames.toArray(new String[fetchedNames.size()]); + Arrays.sort(sortedFetchedNames); + Assert.assertArrayEquals(tableNames, sortedFetchedNames); + + List regexNames = store.getTables(dbNames[0], "*y"); + Assert.assertEquals(2, regexNames.size()); + String[] sortedRegexNames = regexNames.toArray(new String[regexNames.size()]); + Arrays.sort(sortedRegexNames); + Assert.assertArrayEquals(Arrays.copyOfRange(tableNames, 0, 2), sortedRegexNames); + + List fetchedTables = store.getTableObjectsByName(dbNames[1], + Arrays.asList(Arrays.copyOfRange(tableNames, 1, 3))); + Assert.assertEquals(2, fetchedTables.size()); + sortedFetchedNames = new String[fetchedTables.size()]; + for (int i = 0; i < fetchedTables.size(); i++) { + sortedFetchedNames[i] = fetchedTables.get(i).getTableName(); + } + Arrays.sort(sortedFetchedNames); + Assert.assertArrayEquals(Arrays.copyOfRange(tableNames, 1, 3), sortedFetchedNames); + } + + @Test + public void dropTable() throws Exception { + String tableName = "dtable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertNotNull(t); + + store.dropTable("default", tableName); + Assert.assertNull(store.getTable("default", tableName)); + } + + @Test + public void createPartition() throws Exception { + String dbName = "default"; + String tableName = "myparttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = new ArrayList(); + vals.add("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(dbName, tableName, vals); + Assert.assertEquals(1, p.getSd().getColsSize()); + Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); + Assert.assertEquals("input", p.getSd().getInputFormat()); + Assert.assertEquals("output", p.getSd().getOutputFormat()); + Assert.assertEquals(dbName, p.getDbName()); + Assert.assertEquals(tableName, p.getTableName()); + Assert.assertEquals(1, p.getValuesSize()); + Assert.assertEquals("fred", p.getValues().get(0)); + } + + @Test + public void addPartitions() throws Exception { + String dbName = "default"; + String tableName = "addParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + List partitions = new ArrayList(); + for (String val : partVals) { + List vals = new ArrayList(); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + partitions.add(part); + } + store.addPartitions(dbName, tableName, partitions); + + List partNames = store.listPartitionNames(dbName, tableName, (short) -1); + Assert.assertEquals(5, partNames.size()); + String[] names = partNames.toArray(new String[partNames.size()]); + Arrays.sort(names); + String[] canonicalNames = partVals.toArray(new String[partVals.size()]); + for (int i = 0; i < canonicalNames.length; i++) canonicalNames[i] = "pc=" + canonicalNames[i]; + Assert.assertArrayEquals(canonicalNames, names); + } + + @Test + public void alterPartitions() throws Exception { + String dbName = "default"; + String tableName = "alterParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + List partitions = new ArrayList(); + List> allVals = new ArrayList>(); + for (String val : partVals) { + List vals = new ArrayList(); + allVals.add(vals); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + partitions.add(part); + } + store.addPartitions(dbName, tableName, partitions); + + for (Partition p : partitions) p.setLastAccessTime(startTime + 10); + store.alterPartitions(dbName, tableName, allVals, partitions); + + partitions = store.getPartitions(dbName, tableName, -1); + for (Partition part : partitions) { + Assert.assertEquals(startTime + 10, part.getLastAccessTime()); + } + } + + @Test + public void getPartitions() throws Exception { + String dbName = "default"; + String tableName = "manyParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + for (String val : partVals) { + List vals = new ArrayList(); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(dbName, tableName, vals); + Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); + } + + List parts = store.getPartitions(dbName, tableName, -1); + Assert.assertEquals(5, parts.size()); + String[] pv = new String[5]; + for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); + Arrays.sort(pv); + Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); + } + + @Test + public void listPartitions() throws Exception { + String dbName = "default"; + String tableName = "listParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + partCols.add(new FieldSchema("region", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; + for (String[] pv : partVals) { + List vals = new ArrayList(); + for (String v : pv) vals.add(v); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + } + + List names = store.listPartitionNames(dbName, tableName, (short) -1); + Assert.assertEquals(2, names.size()); + String[] resultNames = names.toArray(new String[names.size()]); + Arrays.sort(resultNames); + Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", + "pc=tomorrow/region=europe"}); + + List parts = store.getPartitionsByNames(dbName, tableName, names); + Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); + Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); + + store.dropPartitions(dbName, tableName, names); + List afterDropParts = store.getPartitions(dbName, tableName, -1); + Assert.assertEquals(0, afterDropParts.size()); + } + + @Test + public void listPartitionsWithPs() throws Exception { + String dbName = "default"; + String tableName = "listPartitionsWithPs"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("ds", "string", "")); + partCols.add(new FieldSchema("region", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + String[][] partVals = new String[][]{{"today", "north america"}, {"today", "europe"}, + {"tomorrow", "north america"}, {"tomorrow", "europe"}}; + for (String[] pv : partVals) { + List vals = new ArrayList(); + for (String v : pv) vals.add(v); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/ds=" + pv[0] + "/region=" + pv[1]); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + } + + // We only test listPartitionNamesPs since it calls listPartitionsPsWithAuth anyway. + // Test the case where we completely specify the partition + List partitionNames = + store.listPartitionNamesPs(dbName, tableName, Arrays.asList(partVals[0]), (short) -1); + Assert.assertEquals(1, partitionNames.size()); + Assert.assertEquals("ds=today/region=north america", partitionNames.get(0)); + + // Leave off the last value of the partition + partitionNames = + store.listPartitionNamesPs(dbName, tableName, Arrays.asList(partVals[0][0]), (short)-1); + Assert.assertEquals(2, partitionNames.size()); + String[] names = partitionNames.toArray(new String[partitionNames.size()]); + Arrays.sort(names); + Assert.assertArrayEquals(new String[] {"ds=today/region=europe", + "ds=today/region=north america"}, names); + + // Put a star in the last value of the partition + partitionNames = + store.listPartitionNamesPs(dbName, tableName, Arrays.asList("today", "*"), (short)-1); + Assert.assertEquals(2, partitionNames.size()); + names = partitionNames.toArray(new String[partitionNames.size()]); + Arrays.sort(names); + Assert.assertArrayEquals(new String[] {"ds=today/region=europe", + "ds=today/region=north america"}, names); + + // Put a star in the first value of the partition + partitionNames = + store.listPartitionNamesPs(dbName, tableName, Arrays.asList("*", "europe"), (short)-1); + Assert.assertEquals(2, partitionNames.size()); + names = partitionNames.toArray(new String[partitionNames.size()]); + Arrays.sort(names); + Assert.assertArrayEquals(new String[] {"ds=today/region=europe", + "ds=tomorrow/region=europe"}, names); + } + + + @Test + public void getPartitionsByFilter() throws Exception { + String dbName = "default"; + String tableName = "getPartitionsByFilter"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("ds", "string", "")); + partCols.add(new FieldSchema("region", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + String[][] partVals = new String[][]{{"20010101", "north america"}, {"20010101", "europe"}, + {"20010102", "north america"}, {"20010102", "europe"}, {"20010103", "north america"}}; + for (String[] pv : partVals) { + List vals = new ArrayList(); + for (String v : pv) vals.add(v); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/ds=" + pv[0] + "/region=" + pv[1]); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + } + + // We only test getPartitionsByFilter since it calls same code as getPartitionsByExpr anyway. + // Test the case where we completely specify the partition + List parts = null; + parts = store.getPartitionsByFilter(dbName, tableName, "ds > '20010101'", (short) -1); + checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", + "[20010103, north america]"); + + parts = store.getPartitionsByFilter(dbName, tableName, "ds >= '20010102'", (short) -1); + checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", + "[20010103, north america]"); + + parts = store.getPartitionsByFilter(dbName, tableName, + "ds >= '20010102' and region = 'europe' ", (short) -1); + // filtering on first partition is only implemented as of now, so it will + // not filter on region + checkPartVals(parts, "[20010102, north america]", "[20010102, europe]", + "[20010103, north america]"); + + parts = store.getPartitionsByFilter(dbName, tableName, + "ds >= '20010101' and ds < '20010102'", (short) -1); + checkPartVals(parts,"[20010101, north america]", "[20010101, europe]"); + + parts = store.getPartitionsByFilter(dbName, tableName, + "ds = '20010102' or ds < '20010103'", (short) -1); + checkPartVals(parts, "[20010101, north america]", "[20010101, europe]", + "[20010102, north america]", "[20010102, europe]"); + + // test conversion to DNF + parts = store.getPartitionsByFilter(dbName, tableName, + "ds = '20010102' and (ds = '20010102' or region = 'europe')", (short) -1); + // filtering on first partition is only implemented as of now, so it will not filter on region + checkPartVals(parts, "[20010102, north america]", "[20010102, europe]"); + + parts = store.getPartitionsByFilter(dbName, tableName, + "region = 'europe'", (short) -1); + // filtering on first partition is only implemented as of now, so it will not filter on region + checkPartVals(parts, "[20010101, north america]", "[20010101, europe]", + "[20010102, north america]", "[20010102, europe]", "[20010103, north america]"); + + } + + /** + * Check if the given partitions have same values as given partitions value strings + * @param parts given partitions + * @param expectedPartVals + */ + private void checkPartVals(List parts, String ... expectedPartVals) { + Assert.assertEquals("number of partitions", expectedPartVals.length, parts.size()); + Set partValStrings = new TreeSet(); + for(Partition part : parts) { + partValStrings.add(part.getValues().toString()); + } + partValStrings.equals(new TreeSet(Arrays.asList(expectedPartVals))); + } + + @Test + public void dropPartition() throws Exception { + String dbName = "default"; + String tableName = "myparttable2"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Assert.assertNotNull(store.getPartition(dbName, tableName, vals)); + store.dropPartition(dbName, tableName, vals); + thrown.expect(NoSuchObjectException.class); + store.getPartition(dbName, tableName, vals); + } + + @Test + public void createRole() throws Exception { + int now = (int)System.currentTimeMillis()/1000; + String roleName = "myrole"; + store.addRole(roleName, "me"); + + Role r = store.getRole(roleName); + Assert.assertEquals(roleName, r.getRoleName()); + Assert.assertEquals("me", r.getOwnerName()); + Assert.assertTrue(now <= r.getCreateTime()); + } + + @Test + public void dropRole() throws Exception { + String roleName = "anotherrole"; + store.addRole(roleName, "me"); + + Role r = store.getRole(roleName); + Assert.assertEquals(roleName, r.getRoleName()); + + store.removeRole(roleName); + thrown.expect(NoSuchObjectException.class); + store.getRole(roleName); + } + + @Test + public void grantRevokeRoles() throws Exception { + int now = (int)(System.currentTimeMillis()/1000); + String roleName1 = "role1"; + store.addRole(roleName1, "me"); + String roleName2 = "role2"; + store.addRole(roleName2, "me"); + + Role role1 = store.getRole(roleName1); + Role role2 = store.getRole(roleName2); + + store.grantRole(role1, "fred", PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role2, roleName1, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role2, "fred", PrincipalType.USER, "admin", PrincipalType.ROLE, false); + + List roles = store.listRoles("fred", PrincipalType.USER); + Assert.assertEquals(3, roles.size()); + boolean sawRole1 = false, sawRole2 = false, sawPublic = false; + for (Role role : roles) { + if (role.getRoleName().equals(roleName1)) { + sawRole1 = true; + } else if (role.getRoleName().equals(roleName2)) { + sawRole2 = true; + } else if (role.getRoleName().equals(HiveMetaStore.PUBLIC)) { + sawPublic = true; + } else { + Assert.fail("Unknown role name " + role.getRoleName()); + } + } + Assert.assertTrue(sawRole1 && sawRole2 && sawPublic); + + roles = store.listRoles("fred", PrincipalType.ROLE); + Assert.assertEquals(0, roles.size()); + + roles = store.listRoles(roleName1, PrincipalType.ROLE); + Assert.assertEquals(1, roles.size()); + Role role = roles.get(0); + Assert.assertEquals(roleName2, role.getRoleName()); + + // Test listing all members in a role + List grants = store.listRoleMembers(roleName1); + Assert.assertEquals(1, grants.size()); + Assert.assertEquals("fred", grants.get(0).getPrincipalName()); + Assert.assertEquals(PrincipalType.USER, grants.get(0).getPrincipalType()); + Assert.assertTrue("Expected grant time of " + now + " got " + grants.get(0).getGrantTime(), + grants.get(0).getGrantTime() >= now); + Assert.assertEquals("bob", grants.get(0).getGrantorName()); + Assert.assertEquals(PrincipalType.USER, grants.get(0).getGrantorPrincipalType()); + Assert.assertFalse(grants.get(0).isGrantOption()); + + grants = store.listRoleMembers(roleName2); + Assert.assertEquals(2, grants.size()); + boolean sawFred = false; + sawRole1 = false; + for (RolePrincipalGrant m : grants) { + if ("fred".equals(m.getPrincipalName())) sawFred = true; + else if (roleName1.equals(m.getPrincipalName())) sawRole1 = true; + else Assert.fail("Unexpected principal " + m.getPrincipalName()); + } + Assert.assertTrue(sawFred && sawRole1); + + // Revoke a role with grant option, make sure it just goes to no grant option + store.revokeRole(role2, roleName1, PrincipalType.ROLE, true); + roles = store.listRoles(roleName1, PrincipalType.ROLE); + Assert.assertEquals(1, roles.size()); + Assert.assertEquals(roleName2, roles.get(0).getRoleName()); + + grants = store.listRoleMembers(roleName1); + Assert.assertFalse(grants.get(0).isGrantOption()); + + // Drop a role, make sure it is properly removed from the map + store.removeRole(roleName1); + roles = store.listRoles("fred", PrincipalType.USER); + Assert.assertEquals(2, roles.size()); + sawRole2 = sawPublic = false; + for (Role m : roles) { + if (m.getRoleName().equals(roleName2)) sawRole2 = true; + else if (m.getRoleName().equals(HiveMetaStore.PUBLIC)) sawPublic = true; + else Assert.fail("Unknown role " + m.getRoleName()); + } + Assert.assertTrue(sawRole2 && sawPublic); + roles = store.listRoles(roleName1, PrincipalType.ROLE); + Assert.assertEquals(0, roles.size()); + + // Revoke a role without grant option, make sure it goes away + store.revokeRole(role2, "fred", PrincipalType.USER, false); + roles = store.listRoles("fred", PrincipalType.USER); + Assert.assertEquals(1, roles.size()); + Assert.assertEquals(HiveMetaStore.PUBLIC, roles.get(0).getRoleName()); + } + + @Test + public void userToRoleMap() throws Exception { + String roleName1 = "utrm1"; + store.addRole(roleName1, "me"); + String roleName2 = "utrm2"; + store.addRole(roleName2, "me"); + String user1 = "wilma"; + String user2 = "betty"; + + Role role1 = store.getRole(roleName1); + Role role2 = store.getRole(roleName2); + + store.grantRole(role1, user1, PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleName2, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + + List roles = HBaseReadWrite.getInstance().getUserRoles(user1); + Assert.assertEquals(2, roles.size()); + String[] roleNames = roles.toArray(new String[roles.size()]); + Arrays.sort(roleNames); + Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); + + store.grantRole(role2, user1, PrincipalType.USER, "admin", PrincipalType.ROLE, false); + store.grantRole(role1, user2, PrincipalType.USER, "bob", PrincipalType.USER, false); + + roles = HBaseReadWrite.getInstance(conf).getUserRoles(user2); + Assert.assertEquals(2, roles.size()); + roleNames = roles.toArray(new String[roles.size()]); + Arrays.sort(roleNames); + Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); + + store.revokeRole(role1, roleName2, PrincipalType.ROLE, false); + + // user1 should still have both roles since she was granted into role1 specifically. user2 + // should only have role2 now since role2 was revoked from role1. + roles = HBaseReadWrite.getInstance(conf).getUserRoles(user1); + Assert.assertEquals(2, roles.size()); + roleNames = roles.toArray(new String[roles.size()]); + Arrays.sort(roleNames); + Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); + + roles = HBaseReadWrite.getInstance(conf).getUserRoles(user2); + Assert.assertEquals(1, roles.size()); + Assert.assertEquals(roleName1, roles.get(0)); + } + + @Test + public void userToRoleMapOnDrop() throws Exception { + String roleName1 = "utrmod1"; + store.addRole(roleName1, "me"); + String roleName2 = "utrmod2"; + store.addRole(roleName2, "me"); + String user1 = "pebbles"; + String user2 = "bam-bam"; + + Role role1 = store.getRole(roleName1); + Role role2 = store.getRole(roleName2); + + store.grantRole(role1, user1, PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleName2, PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role1, user2, PrincipalType.USER, "bob", PrincipalType.USER, false); + + List roles = HBaseReadWrite.getInstance().getUserRoles(user2); + Assert.assertEquals(2, roles.size()); + String[] roleNames = roles.toArray(new String[roles.size()]); + Arrays.sort(roleNames); + Assert.assertArrayEquals(new String[]{roleName1, roleName2}, roleNames); + + store.removeRole(roleName2); + + roles = HBaseReadWrite.getInstance(conf).getUserRoles(user1); + Assert.assertEquals(1, roles.size()); + Assert.assertEquals(roleName1, roles.get(0)); + + roles = HBaseReadWrite.getInstance(conf).getUserRoles(user2); + Assert.assertEquals(1, roles.size()); + Assert.assertEquals(roleName1, roles.get(0)); + } + + @Test + public void grantRevokeGlobalPrivileges() throws Exception { + doGrantRevoke(HiveObjectType.GLOBAL, null, null, new String[] {"grpg1", "grpg2"}, + new String[] {"bugs", "elmer", "daphy", "wiley"}); + } + + @Test + public void grantRevokeDbPrivileges() throws Exception { + String dbName = "grdbp_db"; + try { + Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + doGrantRevoke(HiveObjectType.DATABASE, dbName, null, + new String[] {"grdbp_role1", "grdbp_role2"}, + new String[] {"fred", "barney", "wilma", "betty"}); + } finally { + store.dropDatabase(dbName); + } + } + + @Test + public void grantRevokeTablePrivileges() throws Exception { + String dbName = "grtp_db"; + String tableName = "grtp_table"; + try { + Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + doGrantRevoke(HiveObjectType.TABLE, dbName, tableName, + new String[] {"grtp_role1", "grtp_role2"}, + new String[] {"batman", "robin", "superman", "wonderwoman"}); + + } finally { + if (store.getTable(dbName, tableName) != null) store.dropTable(dbName, tableName); + store.dropDatabase(dbName); + } + } + + private void doGrantRevoke(HiveObjectType objectType, String dbName, String tableName, + String[] roleNames, String[] userNames) + throws Exception { + store.addRole(roleNames[0], "me"); + store.addRole(roleNames[1], "me"); + int now = (int)(System.currentTimeMillis() / 1000); + + Role role1 = store.getRole(roleNames[0]); + Role role2 = store.getRole(roleNames[1]); + store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); + + List privileges = new ArrayList(); + HiveObjectRef hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + PrivilegeGrantInfo grantInfo = + new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); + HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, + grantInfo); + privileges.add(hop); + + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("exec", now, "me", PrincipalType.USER, false); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[1], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("create", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); + privileges.add(hop); + + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("create2", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); + privileges.add(hop); + + PrivilegeBag pBag = new PrivilegeBag(privileges); + store.grantPrivileges(pBag); + + PrincipalPrivilegeSet pps = getPPS(objectType, dbName, tableName, userNames[0]); + + Assert.assertEquals(1, pps.getUserPrivilegesSize()); + Assert.assertEquals(1, pps.getUserPrivileges().get(userNames[0]).size()); + grantInfo = pps.getUserPrivileges().get(userNames[0]).get(0); + Assert.assertEquals("read", grantInfo.getPrivilege()); + Assert.assertTrue(now <= grantInfo.getCreateTime()); + Assert.assertEquals("me", grantInfo.getGrantor()); + Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); + Assert.assertFalse(grantInfo.isGrantOption()); + + Assert.assertEquals(2, pps.getRolePrivilegesSize()); + Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); + grantInfo = pps.getRolePrivileges().get(roleNames[0]).get(0); + Assert.assertEquals("write", grantInfo.getPrivilege()); + Assert.assertTrue(now <= grantInfo.getCreateTime()); + Assert.assertEquals("me", grantInfo.getGrantor()); + Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); + Assert.assertTrue(grantInfo.isGrantOption()); + + Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[1]).size()); + grantInfo = pps.getRolePrivileges().get(roleNames[1]).get(0); + Assert.assertEquals("exec", grantInfo.getPrivilege()); + Assert.assertTrue(now <= grantInfo.getCreateTime()); + Assert.assertEquals("me", grantInfo.getGrantor()); + Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); + Assert.assertFalse(grantInfo.isGrantOption()); + + pps = getPPS(objectType, dbName, tableName, userNames[1]); + + Assert.assertEquals(0, pps.getUserPrivilegesSize()); + + Assert.assertEquals(1, pps.getRolePrivilegesSize()); + Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[1]).size()); + grantInfo = pps.getRolePrivileges().get(roleNames[1]).get(0); + Assert.assertEquals("exec", grantInfo.getPrivilege()); + Assert.assertTrue(now <= grantInfo.getCreateTime()); + Assert.assertEquals("me", grantInfo.getGrantor()); + Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); + Assert.assertFalse(grantInfo.isGrantOption()); + + pps = getPPS(objectType, dbName, tableName, userNames[2]); + + Assert.assertEquals(1, pps.getUserPrivilegesSize()); + Assert.assertEquals(2, pps.getUserPrivileges().get(userNames[2]).size()); + Assert.assertEquals(0, pps.getRolePrivilegesSize()); + + pps = getPPS(objectType, dbName, tableName, userNames[3]); + Assert.assertEquals(0, pps.getUserPrivilegesSize()); + Assert.assertEquals(0, pps.getRolePrivilegesSize()); + + // Test that removing role removes the role grants + store.removeRole(roleNames[1]); + checkRoleRemovedFromAllPrivileges(objectType, dbName, tableName, roleNames[1]); + pps = getPPS(objectType, dbName, tableName, userNames[0]); + + Assert.assertEquals(1, pps.getRolePrivilegesSize()); + Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); + + pps = getPPS(objectType, dbName, tableName, userNames[1]); + + Assert.assertEquals(0, pps.getRolePrivilegesSize()); + + // Test that revoking with grant option = true just removes grant option + privileges.clear(); + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + hiveObjRef = new HiveObjectRef(objectType, dbName, tableName, null, null); + grantInfo = new PrivilegeGrantInfo("create2", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, userNames[2], PrincipalType.USER, grantInfo); + privileges.add(hop); + + pBag = new PrivilegeBag(privileges); + store.revokePrivileges(pBag, true); + pps = getPPS(objectType, dbName, tableName, userNames[0]); + + Assert.assertEquals(1, pps.getRolePrivilegesSize()); + Assert.assertEquals(1, pps.getRolePrivileges().get(roleNames[0]).size()); + grantInfo = pps.getRolePrivileges().get(roleNames[0]).get(0); + Assert.assertEquals("write", grantInfo.getPrivilege()); + Assert.assertTrue(now <= grantInfo.getCreateTime()); + Assert.assertEquals("me", grantInfo.getGrantor()); + Assert.assertEquals(PrincipalType.USER, grantInfo.getGrantorType()); + Assert.assertFalse(grantInfo.isGrantOption()); + + pps = getPPS(objectType, dbName, tableName, userNames[2]); + + Assert.assertEquals(1, pps.getUserPrivilegesSize()); + Assert.assertEquals(2, pps.getUserPrivileges().get(userNames[2]).size()); + for (PrivilegeGrantInfo pgi : pps.getUserPrivileges().get(userNames[2])) { + if (pgi.getPrivilege().equals("create")) Assert.assertTrue(pgi.isGrantOption()); + else if (pgi.getPrivilege().equals("create2")) Assert.assertFalse(pgi.isGrantOption()); + else Assert.fail("huh?"); + } + + // Test revoking revokes + store.revokePrivileges(pBag, false); + + pps = getPPS(objectType, dbName, tableName, userNames[0]); + + Assert.assertEquals(1, pps.getUserPrivilegesSize()); + Assert.assertEquals(1, pps.getRolePrivilegesSize()); + Assert.assertEquals(0, pps.getRolePrivileges().get(roleNames[0]).size()); + + pps = getPPS(objectType, dbName, tableName, userNames[2]); + Assert.assertEquals(1, pps.getUserPrivilegesSize()); + Assert.assertEquals(1, pps.getUserPrivileges().get(userNames[2]).size()); + Assert.assertEquals("create", pps.getUserPrivileges().get(userNames[2]).get(0).getPrivilege()); + Assert.assertEquals(0, pps.getRolePrivilegesSize()); + } + + private PrincipalPrivilegeSet getPPS(HiveObjectType objectType, String dbName, String tableName, + String userName) + throws InvalidObjectException, MetaException { + switch (objectType) { + case GLOBAL: return store.getUserPrivilegeSet(userName, null); + case DATABASE: return store.getDBPrivilegeSet(dbName, userName, null); + case TABLE: return store.getTablePrivilegeSet(dbName, tableName, userName, null); + default: throw new RuntimeException("huh?"); + } + } + + private void checkRoleRemovedFromAllPrivileges(HiveObjectType objectType, String dbName, + String tableName, String roleName) + throws IOException, NoSuchObjectException, MetaException { + List pgi = null; + switch (objectType) { + case GLOBAL: + pgi = HBaseReadWrite.getInstance().getGlobalPrivs().getRolePrivileges().get(roleName); + break; + + case DATABASE: + pgi = store.getDatabase(dbName).getPrivileges().getRolePrivileges().get(roleName); + break; + + case TABLE: + pgi = store.getTable(dbName, tableName).getPrivileges().getRolePrivileges().get(roleName); + break; + + default: + Assert.fail(); + } + + Assert.assertNull("Expected null for role " + roleName + " for type " + objectType.toString() + + " with db " + dbName + " and table " + tableName, pgi); + } + + @Test + public void listDbGrants() throws Exception { + String dbNames[] = new String[] {"ldbg_db1", "ldbg_db2"}; + try { + Database db = new Database(dbNames[0], "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + db = new Database(dbNames[1], "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + String[] roleNames = new String[]{"ldbg_role1", "ldbg_role2"}; + String[] userNames = new String[]{"frodo", "sam"}; + + store.addRole(roleNames[0], "me"); + store.addRole(roleNames[1], "me"); + int now = (int)(System.currentTimeMillis() / 1000); + + Role role1 = store.getRole(roleNames[0]); + Role role2 = store.getRole(roleNames[1]); + store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); + + List privileges = new ArrayList(); + HiveObjectRef hiveObjRef = + new HiveObjectRef(HiveObjectType.DATABASE, dbNames[0], null, null, null); + PrivilegeGrantInfo grantInfo = + new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); + HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, + grantInfo); + privileges.add(hop); + + grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + PrivilegeBag pBag = new PrivilegeBag(privileges); + store.grantPrivileges(pBag); + + List hops = + store.listPrincipalDBGrants(roleNames[0], PrincipalType.ROLE, dbNames[0]); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalDBGrants(userNames[0], PrincipalType.USER, dbNames[0]); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalDBGrants(roleNames[1], PrincipalType.ROLE, dbNames[0]); + Assert.assertEquals(0, hops.size()); + hops = store.listPrincipalDBGrants(userNames[1], PrincipalType.USER, dbNames[0]); + Assert.assertEquals(0, hops.size()); + + hops = store.listPrincipalDBGrants(roleNames[0], PrincipalType.ROLE, dbNames[1]); + Assert.assertEquals(0, hops.size()); + hops = store.listPrincipalDBGrants(userNames[0], PrincipalType.USER, dbNames[1]); + Assert.assertEquals(0, hops.size()); + + hops = store.listDBGrantsAll(dbNames[0]); + Assert.assertEquals(2, hops.size()); + boolean sawUser = false, sawRole = false; + for (HiveObjectPrivilege h : hops) { + if (h.getPrincipalName().equals(userNames[0])) { + Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, h.getHiveObject().getObjectType()); + Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); + sawUser = true; + } else if (h.getPrincipalName().equals(roleNames[0])) { + Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, h.getHiveObject().getObjectType()); + Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); + sawRole = true; + } + } + Assert.assertTrue(sawUser && sawRole); + + hops = store.listPrincipalDBGrantsAll(roleNames[0], PrincipalType.ROLE); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalDBGrantsAll(userNames[0], PrincipalType.USER); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.DATABASE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalDBGrantsAll(roleNames[1], PrincipalType.ROLE); + Assert.assertEquals(0, hops.size()); + hops = store.listPrincipalDBGrantsAll(userNames[1], PrincipalType.USER); + Assert.assertEquals(0, hops.size()); + + + } finally { + store.dropDatabase(dbNames[0]); + store.dropDatabase(dbNames[1]); + } + } + + @Test + public void listGlobalGrants() throws Exception { + String[] roleNames = new String[]{"lgg_role1", "lgg_role2"}; + String[] userNames = new String[]{"merry", "pippen"}; + + store.addRole(roleNames[0], "me"); + store.addRole(roleNames[1], "me"); + int now = (int)(System.currentTimeMillis() / 1000); + + Role role1 = store.getRole(roleNames[0]); + Role role2 = store.getRole(roleNames[1]); + store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); + + List privileges = new ArrayList(); + HiveObjectRef hiveObjRef = + new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); + PrivilegeGrantInfo grantInfo = + new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); + HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, + grantInfo); + privileges.add(hop); + + grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + PrivilegeBag pBag = new PrivilegeBag(privileges); + store.grantPrivileges(pBag); + + List hops = + store.listPrincipalGlobalGrants(roleNames[0], PrincipalType.ROLE); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.GLOBAL, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalGlobalGrants(userNames[0], PrincipalType.USER); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.GLOBAL, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalGlobalGrants(roleNames[1], PrincipalType.ROLE); + Assert.assertEquals(0, hops.size()); + hops = store.listPrincipalGlobalGrants(userNames[1], PrincipalType.USER); + Assert.assertEquals(0, hops.size()); + + hops = store.listGlobalGrantsAll(); + Assert.assertEquals(2, hops.size()); + boolean sawUser = false, sawRole = false; + for (HiveObjectPrivilege h : hops) { + if (h.getPrincipalName().equals(userNames[0])) { + Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.GLOBAL, h.getHiveObject().getObjectType()); + Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); + sawUser = true; + } else if (h.getPrincipalName().equals(roleNames[0])) { + Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.GLOBAL, h.getHiveObject().getObjectType()); + Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); + sawRole = true; + } + } + Assert.assertTrue(sawUser && sawRole); + } + + @Test + public void listTableGrants() throws Exception { + String dbName = "ltg_db"; + String[] tableNames = new String[] {"ltg_t1", "ltg_t2"}; + try { + Database db = new Database(dbName, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableNames[0], dbName, "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + table = new Table(tableNames[1], dbName, "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + String[] roleNames = new String[]{"ltg_role1", "ltg_role2"}; + String[] userNames = new String[]{"gandalf", "radagast"}; + + store.addRole(roleNames[0], "me"); + store.addRole(roleNames[1], "me"); + int now = (int)(System.currentTimeMillis() / 1000); + + Role role1 = store.getRole(roleNames[0]); + Role role2 = store.getRole(roleNames[1]); + store.grantRole(role1, userNames[0], PrincipalType.USER, "bob", PrincipalType.USER, false); + store.grantRole(role1, roleNames[1], PrincipalType.ROLE, "admin", PrincipalType.ROLE, true); + store.grantRole(role2, userNames[1], PrincipalType.USER, "bob", PrincipalType.USER, false); + + List privileges = new ArrayList(); + HiveObjectRef hiveObjRef = + new HiveObjectRef(HiveObjectType.TABLE, dbName, tableNames[0], null, null); + PrivilegeGrantInfo grantInfo = + new PrivilegeGrantInfo("read", now, "me", PrincipalType.USER, false); + HiveObjectPrivilege hop = new HiveObjectPrivilege(hiveObjRef, userNames[0], PrincipalType.USER, + grantInfo); + privileges.add(hop); + + grantInfo = new PrivilegeGrantInfo("write", now, "me", PrincipalType.USER, true); + hop = new HiveObjectPrivilege(hiveObjRef, roleNames[0], PrincipalType.ROLE, grantInfo); + privileges.add(hop); + + PrivilegeBag pBag = new PrivilegeBag(privileges); + store.grantPrivileges(pBag); + + List hops = + store.listAllTableGrants(roleNames[0], PrincipalType.ROLE, dbName, tableNames[0]); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listAllTableGrants(userNames[0], PrincipalType.USER, dbName, tableNames[0]); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listAllTableGrants(roleNames[1], PrincipalType.ROLE, dbName, tableNames[0]); + Assert.assertEquals(0, hops.size()); + hops = store.listAllTableGrants(userNames[1], PrincipalType.USER, dbName, tableNames[0]); + Assert.assertEquals(0, hops.size()); + + hops = store.listAllTableGrants(roleNames[0], PrincipalType.ROLE, dbName, tableNames[1]); + Assert.assertEquals(0, hops.size()); + hops = store.listAllTableGrants(userNames[0], PrincipalType.USER, dbName, tableNames[1]); + Assert.assertEquals(0, hops.size()); + + hops = store.listTableGrantsAll(dbName, tableNames[0]); + Assert.assertEquals(2, hops.size()); + boolean sawUser = false, sawRole = false; + for (HiveObjectPrivilege h : hops) { + if (h.getPrincipalName().equals(userNames[0])) { + Assert.assertEquals(PrincipalType.USER, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, h.getHiveObject().getObjectType()); + Assert.assertEquals("read", h.getGrantInfo().getPrivilege()); + sawUser = true; + } else if (h.getPrincipalName().equals(roleNames[0])) { + Assert.assertEquals(PrincipalType.ROLE, h.getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, h.getHiveObject().getObjectType()); + Assert.assertEquals("write", h.getGrantInfo().getPrivilege()); + sawRole = true; + } + } + Assert.assertTrue(sawUser && sawRole); + + hops = store.listPrincipalTableGrantsAll(roleNames[0], PrincipalType.ROLE); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.ROLE, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("write", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalTableGrantsAll(userNames[0], PrincipalType.USER); + Assert.assertEquals(1, hops.size()); + Assert.assertEquals(PrincipalType.USER, hops.get(0).getPrincipalType()); + Assert.assertEquals(HiveObjectType.TABLE, hops.get(0).getHiveObject().getObjectType()); + Assert.assertEquals("read", hops.get(0).getGrantInfo().getPrivilege()); + + hops = store.listPrincipalDBGrantsAll(roleNames[1], PrincipalType.ROLE); + Assert.assertEquals(0, hops.size()); + hops = store.listPrincipalDBGrantsAll(userNames[1], PrincipalType.USER); + Assert.assertEquals(0, hops.size()); + + + } finally { + store.dropTable(dbName, tableNames[0]); + store.dropTable(dbName, tableNames[1]); + store.dropDatabase(dbName); + } + } + + @Test + public void tableStatistics() throws Exception { + long now = System.currentTimeMillis(); + String dbname = "default"; + String tableName = "statstable"; + String boolcol = "boolcol"; + String longcol = "longcol"; + String doublecol = "doublecol"; + String stringcol = "stringcol"; + String binarycol = "bincol"; + String decimalcol = "deccol"; + long trues = 37; + long falses = 12; + long booleanNulls = 2; + long longHigh = 120938479124L; + long longLow = -12341243213412124L; + long longNulls = 23; + long longDVs = 213L; + double doubleHigh = 123423.23423; + double doubleLow = 0.00001234233; + long doubleNulls = 92; + long doubleDVs = 1234123421L; + long strMaxLen = 1234; + double strAvgLen = 32.3; + long strNulls = 987; + long strDVs = 906; + long binMaxLen = 123412987L; + double binAvgLen = 76.98; + long binNulls = 976998797L; + Decimal decHigh = new Decimal(); + decHigh.setScale((short)3); + decHigh.setUnscaled("3876".getBytes()); // I have no clue how this is translated, but it + // doesn't matter + Decimal decLow = new Decimal(); + decLow.setScale((short)3); + decLow.setUnscaled("38".getBytes()); + long decNulls = 13; + long decDVs = 923947293L; + + List cols = new ArrayList(); + cols.add(new FieldSchema(boolcol, "boolean", "nocomment")); + cols.add(new FieldSchema(longcol, "long", "nocomment")); + cols.add(new FieldSchema(doublecol, "double", "nocomment")); + cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment")); + cols.add(new FieldSchema(binarycol, "binary", "nocomment")); + cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + ColumnStatistics stats = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(now); + desc.setDbName(dbname); + desc.setTableName(tableName); + desc.setIsTblLevel(true); + stats.setStatsDesc(desc); + + // Do one column of each type + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName(boolcol); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(trues); + boolData.setNumFalses(falses); + boolData.setNumNulls(booleanNulls); + data.setBooleanStats(boolData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName(longcol); + obj.setColType("long"); + data = new ColumnStatisticsData(); + LongColumnStatsData longData = new LongColumnStatsData(); + longData.setHighValue(longHigh); + longData.setLowValue(longLow); + longData.setNumNulls(longNulls); + longData.setNumDVs(longDVs); + data.setLongStats(longData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName(doublecol); + obj.setColType("double"); + data = new ColumnStatisticsData(); + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); + doubleData.setHighValue(doubleHigh); + doubleData.setLowValue(doubleLow); + doubleData.setNumNulls(doubleNulls); + doubleData.setNumDVs(doubleDVs); + data.setDoubleStats(doubleData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + store.updateTableColumnStatistics(stats); + + stats = store.getTableColumnStatistics(dbname, tableName, + Arrays.asList(boolcol, longcol, doublecol)); + + // We'll check all of the individual values later. + Assert.assertEquals(3, stats.getStatsObjSize()); + + // check that we can fetch just some of the columns + stats = store.getTableColumnStatistics(dbname, tableName, Arrays.asList(boolcol)); + Assert.assertEquals(1, stats.getStatsObjSize()); + + stats = new ColumnStatistics(); + stats.setStatsDesc(desc); + + + obj = new ColumnStatisticsObj(); + obj.setColName(stringcol); + obj.setColType("string"); + data = new ColumnStatisticsData(); + StringColumnStatsData strData = new StringColumnStatsData(); + strData.setMaxColLen(strMaxLen); + strData.setAvgColLen(strAvgLen); + strData.setNumNulls(strNulls); + strData.setNumDVs(strDVs); + data.setStringStats(strData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName(binarycol); + obj.setColType("binary"); + data = new ColumnStatisticsData(); + BinaryColumnStatsData binData = new BinaryColumnStatsData(); + binData.setMaxColLen(binMaxLen); + binData.setAvgColLen(binAvgLen); + binData.setNumNulls(binNulls); + data.setBinaryStats(binData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + obj = new ColumnStatisticsObj(); + obj.setColName(decimalcol); + obj.setColType("decimal(5,3)"); + data = new ColumnStatisticsData(); + DecimalColumnStatsData decData = new DecimalColumnStatsData(); + LOG.debug("Setting decimal high value to " + decHigh.getScale() + " <" + new String(decHigh.getUnscaled()) + ">"); + decData.setHighValue(decHigh); + decData.setLowValue(decLow); + decData.setNumNulls(decNulls); + decData.setNumDVs(decDVs); + data.setDecimalStats(decData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + store.updateTableColumnStatistics(stats); + + stats = store.getTableColumnStatistics(dbname, tableName, + Arrays.asList(boolcol, longcol, doublecol, stringcol, binarycol, decimalcol)); + Assert.assertEquals(now, stats.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(dbname, stats.getStatsDesc().getDbName()); + Assert.assertEquals(tableName, stats.getStatsDesc().getTableName()); + Assert.assertTrue(stats.getStatsDesc().isIsTblLevel()); + + Assert.assertEquals(6, stats.getStatsObjSize()); + + ColumnStatisticsData colData = stats.getStatsObj().get(0).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, colData.getSetField()); + boolData = colData.getBooleanStats(); + Assert.assertEquals(trues, boolData.getNumTrues()); + Assert.assertEquals(falses, boolData.getNumFalses()); + Assert.assertEquals(booleanNulls, boolData.getNumNulls()); + + colData = stats.getStatsObj().get(1).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, colData.getSetField()); + longData = colData.getLongStats(); + Assert.assertEquals(longHigh, longData.getHighValue()); + Assert.assertEquals(longLow, longData.getLowValue()); + Assert.assertEquals(longNulls, longData.getNumNulls()); + Assert.assertEquals(longDVs, longData.getNumDVs()); + + colData = stats.getStatsObj().get(2).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, colData.getSetField()); + doubleData = colData.getDoubleStats(); + Assert.assertEquals(doubleHigh, doubleData.getHighValue(), 0.01); + Assert.assertEquals(doubleLow, doubleData.getLowValue(), 0.01); + Assert.assertEquals(doubleNulls, doubleData.getNumNulls()); + Assert.assertEquals(doubleDVs, doubleData.getNumDVs()); + + colData = stats.getStatsObj().get(3).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, colData.getSetField()); + strData = colData.getStringStats(); + Assert.assertEquals(strMaxLen, strData.getMaxColLen()); + Assert.assertEquals(strAvgLen, strData.getAvgColLen(), 0.01); + Assert.assertEquals(strNulls, strData.getNumNulls()); + Assert.assertEquals(strDVs, strData.getNumDVs()); + + colData = stats.getStatsObj().get(4).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, colData.getSetField()); + binData = colData.getBinaryStats(); + Assert.assertEquals(binMaxLen, binData.getMaxColLen()); + Assert.assertEquals(binAvgLen, binData.getAvgColLen(), 0.01); + Assert.assertEquals(binNulls, binData.getNumNulls()); + + colData = stats.getStatsObj().get(5).getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, colData.getSetField()); + decData = colData.getDecimalStats(); + Assert.assertEquals(decHigh, decData.getHighValue()); + Assert.assertEquals(decLow, decData.getLowValue()); + Assert.assertEquals(decNulls, decData.getNumNulls()); + Assert.assertEquals(decDVs, decData.getNumDVs()); + + } + + @Test + public void partitionStatistics() throws Exception { + long now = System.currentTimeMillis(); + String dbname = "default"; + String tableName = "statspart"; + String[] partNames = {"ds=today", "ds=yesterday"}; + String[] partVals = {"today", "yesterday"}; + String boolcol = "boolcol"; + String longcol = "longcol"; + String doublecol = "doublecol"; + String stringcol = "stringcol"; + String binarycol = "bincol"; + String decimalcol = "deccol"; + long trues = 37; + long falses = 12; + long booleanNulls = 2; + long strMaxLen = 1234; + double strAvgLen = 32.3; + long strNulls = 987; + long strDVs = 906; + + List cols = new ArrayList(); + cols.add(new FieldSchema(boolcol, "boolean", "nocomment")); + cols.add(new FieldSchema(longcol, "long", "nocomment")); + cols.add(new FieldSchema(doublecol, "double", "nocomment")); + cols.add(new FieldSchema(stringcol, "varchar(32)", "nocomment")); + cols.add(new FieldSchema(binarycol, "binary", "nocomment")); + cols.add(new FieldSchema(decimalcol, "decimal(5, 3)", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + for (int i = 0; i < partNames.length; i++) { + ColumnStatistics stats = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(now); + desc.setDbName(dbname); + desc.setTableName(tableName); + desc.setIsTblLevel(false); + desc.setPartName(partNames[i]); + stats.setStatsDesc(desc); + + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName(boolcol); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(trues); + boolData.setNumFalses(falses); + boolData.setNumNulls(booleanNulls); + data.setBooleanStats(boolData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i])); + } + + List statsList = store.getPartitionColumnStatistics(dbname, tableName, + Arrays.asList(partNames), Arrays.asList(boolcol)); + + Assert.assertEquals(2, statsList.size()); + for (int i = 0; i < partNames.length; i++) { + Assert.assertEquals(1, statsList.get(i).getStatsObjSize()); + } + + for (int i = 0; i < partNames.length; i++) { + ColumnStatistics stats = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(now); + desc.setDbName(dbname); + desc.setTableName(tableName); + desc.setIsTblLevel(false); + desc.setPartName(partNames[i]); + stats.setStatsDesc(desc); + + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName(stringcol); + obj.setColType("string"); + ColumnStatisticsData data = new ColumnStatisticsData(); + StringColumnStatsData strData = new StringColumnStatsData(); + strData.setMaxColLen(strMaxLen); + strData.setAvgColLen(strAvgLen); + strData.setNumNulls(strNulls); + strData.setNumDVs(strDVs); + data.setStringStats(strData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(stats, Arrays.asList(partVals[i])); + } + + // Make sure when we ask for one we only get one + statsList = store.getPartitionColumnStatistics(dbname, tableName, + Arrays.asList(partNames), Arrays.asList(boolcol)); + + Assert.assertEquals(2, statsList.size()); + for (int i = 0; i < partNames.length; i++) { + Assert.assertEquals(1, statsList.get(i).getStatsObjSize()); + } + + statsList = store.getPartitionColumnStatistics(dbname, tableName, + Arrays.asList(partNames), Arrays.asList(boolcol, stringcol)); + + Assert.assertEquals(2, statsList.size()); + for (int i = 0; i < partNames.length; i++) { + Assert.assertEquals(2, statsList.get(i).getStatsObjSize()); + // Just check one piece of the data, I don't need to check it all again + Assert.assertEquals(booleanNulls, + statsList.get(i).getStatsObj().get(0).getStatsData().getBooleanStats().getNumNulls()); + Assert.assertEquals(strDVs, + statsList.get(i).getStatsObj().get(1).getStatsData().getStringStats().getNumDVs()); + } + } + + @Test + public void delegationToken() throws Exception { + store.addToken("abc", "def"); + store.addToken("ghi", "jkl"); + + Assert.assertEquals("def", store.getToken("abc")); + Assert.assertEquals("jkl", store.getToken("ghi")); + Assert.assertNull(store.getToken("wabawaba")); + String[] allToks = store.getAllTokenIdentifiers().toArray(new String[2]); + Arrays.sort(allToks); + Assert.assertArrayEquals(new String[]{"abc", "ghi"}, allToks); + + store.removeToken("abc"); + store.removeToken("wabawaba"); + + Assert.assertNull(store.getToken("abc")); + Assert.assertEquals("jkl", store.getToken("ghi")); + allToks = store.getAllTokenIdentifiers().toArray(new String[1]); + Assert.assertArrayEquals(new String[]{"ghi"}, allToks); + } + + @Test + public void masterKey() throws Exception { + Assert.assertEquals(0, store.addMasterKey("k1")); + Assert.assertEquals(1, store.addMasterKey("k2")); + + String[] keys = store.getMasterKeys(); + Arrays.sort(keys); + Assert.assertArrayEquals(new String[]{"k1", "k2"}, keys); + + store.updateMasterKey(0, "k3"); + keys = store.getMasterKeys(); + Arrays.sort(keys); + Assert.assertArrayEquals(new String[]{"k2", "k3"}, keys); + + store.removeMasterKey(1); + keys = store.getMasterKeys(); + Assert.assertArrayEquals(new String[]{"k3"}, keys); + + thrown.expect(NoSuchObjectException.class); + store.updateMasterKey(72, "whatever"); + } + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java new file mode 100644 index 0000000..decfa4a --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Integration tests with HBase Mini-cluster for HBaseStore + */ +public class TestStorageDescriptorSharing extends HBaseIntegrationTests { + + private static final Log LOG = LogFactory.getLog(TestHBaseStoreIntegration.class.getName()); + + private MessageDigest md; + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException { + setupConnection(); + setupHBaseStore(); + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + + @Test + public void createManyPartitions() throws Exception { + String dbName = "default"; + String tableName = "manyParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + for (String val : partVals) { + List vals = new ArrayList(); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(dbName, tableName, vals); + Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); + } + + Assert.assertEquals(1, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + String tableName2 = "differentTable"; + sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, + serde, null, null, emptyParameters); + table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + // Drop one of the partitions and make sure it doesn't drop the storage descriptor + store.dropPartition(dbName, tableName, Arrays.asList(partVals.get(0))); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + // Alter the second table in a few ways to make sure it changes it's descriptor properly + table = store.getTable(dbName, tableName2); + byte[] sdHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + + // Alter the table without touching the storage descriptor + table.setLastAccessTime(startTime + 1); + store.alterTable(dbName, tableName2, table); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + table = store.getTable(dbName, tableName2); + byte[] alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter the table, changing the storage descriptor + table.getSd().setOutputFormat("output_changed"); + store.alterTable(dbName, tableName2, table); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + table = store.getTable(dbName, tableName2); + alteredHash = HBaseUtils.hashStorageDescriptor(table.getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + // Alter one of the partitions without touching the storage descriptor + Partition part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + sdHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + part.setLastAccessTime(part.getLastAccessTime() + 1); + store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); + Assert.assertEquals(2, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter the partition, changing the storage descriptor + part.getSd().setOutputFormat("output_changed_some_more"); + store.alterPartition(dbName, tableName, Arrays.asList(partVals.get(1)), part); + Assert.assertEquals(3, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + part = store.getPartition(dbName, tableName, Arrays.asList(partVals.get(1))); + alteredHash = HBaseUtils.hashStorageDescriptor(part.getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + // Alter multiple partitions without touching the storage descriptors + List parts = store.getPartitions(dbName, tableName, -1); + sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + for (int i = 1; i < 3; i++) { + parts.get(i).setLastAccessTime(97); + } + List> listPartVals = new ArrayList>(); + for (String pv : partVals.subList(1, partVals.size())) { + listPartVals.add(Arrays.asList(pv)); + } + store.alterPartitions(dbName, tableName, listPartVals, parts); + Assert.assertEquals(3, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + parts = store.getPartitions(dbName, tableName, -1); + alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + Assert.assertArrayEquals(sdHash, alteredHash); + + // Alter multiple partitions changning the storage descriptors + parts = store.getPartitions(dbName, tableName, -1); + sdHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + for (int i = 1; i < 3; i++) { + parts.get(i).getSd().setOutputFormat("yet_a_different_of"); + } + store.alterPartitions(dbName, tableName, listPartVals, parts); + Assert.assertEquals(4, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + parts = store.getPartitions(dbName, tableName, -1); + alteredHash = HBaseUtils.hashStorageDescriptor(parts.get(1).getSd(), md); + Assert.assertFalse(Arrays.equals(sdHash, alteredHash)); + + for (String partVal : partVals.subList(1, partVals.size())) { + store.dropPartition(dbName, tableName, Arrays.asList(partVal)); + } + store.dropTable(dbName, tableName); + store.dropTable(dbName, tableName2); + + Assert.assertEquals(0, HBaseReadWrite.getInstance(conf).countStorageDescriptor()); + + + } +} diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index 0588233..739d06a 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -80,6 +80,13 @@ org.apache.hive + hive-it-unit + ${project.version} + tests + test + + + org.apache.hive hive-it-util ${project.version} test @@ -509,7 +516,8 @@ logDirectory="${project.build.directory}/qfile-results/clientpositive/" hadoopVersion="${active.hadoop.version}" initScript="${initScript}" - cleanupScript="q_test_cleanup.sql"/> + cleanupScript="q_test_cleanup.sql" + useHBaseMetastore="true"/> hadoop-1 + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + + **/metastore/hbase/** + + + + + org.apache.hadoop @@ -120,6 +134,12 @@ org.apache.hbase + hbase-common + ${hbase.hadoop1.version} + tests + + + org.apache.hbase hbase-server ${hbase.hadoop1.version} @@ -170,6 +190,18 @@ org.apache.hbase hbase-server ${hbase.hadoop2.version} + test-jar + + + org.apache.hbase + hbase-common + ${hbase.hadoop2.version} + test-jar + + + org.apache.hbase + hbase-server + ${hbase.hadoop2.version} org.apache.hbase diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java new file mode 100644 index 0000000..1f42007 --- /dev/null +++ b/itests/util/src/main/java/org/apache/hadoop/hive/metastore/hbase/HBaseStoreTestUtil.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.List; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hive.conf.HiveConf; + +public class HBaseStoreTestUtil { + public static void initHBaseMetastore(HBaseAdmin admin, HiveConf conf) throws Exception { + for (String tableName : HBaseReadWrite.tableNames) { + List families = HBaseReadWrite.columnFamilies.get(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + for (byte[] family : families) { + HColumnDescriptor columnDesc = new HColumnDescriptor(family); + desc.addFamily(columnDesc); + } + admin.createTable(desc); + } + admin.close(); + if (conf != null) { + HBaseReadWrite.getInstance(conf); + } + } +} \ No newline at end of file diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 3fae0ba..16e73c9 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -62,9 +62,13 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hive.cli.CliDriver; import org.apache.hadoop.hive.cli.CliSessionState; @@ -100,6 +104,8 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.Shell; import org.apache.hive.common.util.StreamPrinter; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; import org.apache.tools.ant.BuildException; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; @@ -157,10 +163,12 @@ private final String initScript; private final String cleanupScript; + private boolean useHBaseMetastore = false; public interface SuiteAddTestFunctor { public void addTestToSuite(TestSuite suite, Object setup, String tName); } + private HBaseTestingUtility utility; static { for (String srcTable : System.getProperty("test.src.tables", "").trim().split(",")) { @@ -341,15 +349,43 @@ private String getKeyProviderURI() { return "jceks://file" + new Path(keyDir, "test.jks").toUri(); } + private void startMiniHBaseCluster() throws Exception { + Configuration hbaseConf = HBaseConfiguration.create(); + hbaseConf.setInt("hbase.master.info.port", -1); + utility = new HBaseTestingUtility(hbaseConf); + utility.startMiniCluster(); + conf = new HiveConf(utility.getConfiguration(), Driver.class); + HBaseAdmin admin = utility.getHBaseAdmin(); + // Need to use reflection here to make compilation pass since HBaseIntegrationTests + // is not compiled in hadoop-1. All HBaseMetastore tests run under hadoop-2, so this + // guarantee HBaseIntegrationTests exist when we hitting this code path + java.lang.reflect.Method initHBaseMetastoreMethod = Class.forName( + "org.apache.hadoop.hive.metastore.hbase.HBaseStoreTestUtil") + .getMethod("initHBaseMetastore", HBaseAdmin.class, HiveConf.class); + initHBaseMetastoreMethod.invoke(null, admin, conf); + } + public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer, String initScript, String cleanupScript) throws Exception { + this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, false); + } + public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, + String confDir, String hadoopVer, String initScript, String cleanupScript, boolean useHBaseMetastore) + throws Exception { this.outDir = outDir; this.logDir = logDir; + this.useHBaseMetastore = useHBaseMetastore; + + Logger hadoopLog = Logger.getLogger("org.apache.hadoop"); + hadoopLog.setLevel(Level.INFO); if (confDir != null && !confDir.isEmpty()) { HiveConf.setHiveSiteLocation(new URL("file://"+ new File(confDir).toURI().getPath() + "/hive-site.xml")); System.out.println("Setting hive-site: "+HiveConf.getHiveSiteLocation()); } + if (useHBaseMetastore) { + startMiniHBaseCluster(); + } conf = new HiveConf(Driver.class); this.hadoopVer = getHadoopMainVersion(hadoopVer); qMap = new TreeMap(); @@ -438,6 +474,9 @@ public void shutdown() throws Exception { sparkSession = null; } } + if (useHBaseMetastore) { + utility.shutdownMiniCluster(); + } if (mr != null) { mr.shutdown(); mr = null; @@ -1571,7 +1610,7 @@ private static int executeDiffCommand(String inFileName, // close it first. SessionState ss = SessionState.get(); if (ss != null && ss.out != null && ss.out != System.out) { - ss.out.close(); + ss.out.close(); } String inSorted = inFileName + SORT_SUFFIX; diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index d651195..7026a0d 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -714,6 +714,53 @@ struct FireEventResponse { // NOP for now, this is just a place holder for future responses } +struct MetadataPpdResult { + 1: required binary metadata, + 2: required binary includeBitset +} + +// Return type for get_file_metadata_by_expr +struct GetFileMetadataByExprResult { + 1: required map metadata, + 2: required bool isSupported, + 3: required list unknownFileIds +} + +// Request type for get_file_metadata_by_expr +struct GetFileMetadataByExprRequest { + 1: required list fileIds, + 2: required binary expr +} + +// Return type for get_file_metadata +struct GetFileMetadataResult { + 1: required map metadata, + 2: required bool isSupported +} + +// Request type for get_file_metadata +struct GetFileMetadataRequest { + 1: required list fileIds +} + +// Return type for put_file_metadata +struct PutFileMetadataResult { +} + +// Request type for put_file_metadata +struct PutFileMetadataRequest { + 1: required list fileIds, + 2: required list metadata +} + +// Return type for clear_file_metadata +struct ClearFileMetadataResult { +} + +// Request type for clear_file_metadata +struct ClearFileMetadataRequest { + 1: required list fileIds +} struct GetAllFunctionsResponse { 1: optional list functions @@ -1194,6 +1241,13 @@ service ThriftHiveMetastore extends fb303.FacebookService NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst) CurrentNotificationEventId get_current_notificationEventId() FireEventResponse fire_listener_event(1:FireEventRequest rqst) + void flushCache() + + GetFileMetadataByExprResult get_file_metadata_by_expr(1:GetFileMetadataByExprRequest req) + GetFileMetadataResult get_file_metadata(1:GetFileMetadataRequest req) + PutFileMetadataResult put_file_metadata(1:PutFileMetadataRequest req) + ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req) + } // * Note about the DDL_TIME: When creating or altering a table or a partition, diff --git a/metastore/pom.xml b/metastore/pom.xml index a3e25e1..2ab5cd3 100644 --- a/metastore/pom.xml +++ b/metastore/pom.xml @@ -51,6 +51,11 @@ ${guava.version} + com.google.protobuf + protobuf-java + ${protobuf.version} + + com.jolbox bonecp ${bonecp.version} @@ -121,6 +126,27 @@ libthrift ${libthrift.version} + + co.cask.tephra + tephra-api + ${tephra.version} + + + co.cask.tephra + tephra-core + ${tephra.version} + + + org.ow2.asm + asm-all + + + + + co.cask.tephra + tephra-hbase-compat-1.0 + ${tephra.version} + junit @@ -139,6 +165,23 @@ hadoop-1 + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + + **/hbase/** + + + **/hbase/** + + + + + org.apache.hadoop @@ -163,6 +206,11 @@ ${hadoop-23.version} true + + org.apache.hbase + hbase-client + ${hbase.hadoop2.version} + @@ -198,6 +246,39 @@ + + protobuf + + + + org.apache.maven.plugins + maven-antrun-plugin + + + generate-protobuf-sources + generate-sources + + + + + Building HBase Metastore Protobuf + + + + + + + + + + run + + + + + + + @@ -252,6 +333,7 @@ src/model src/gen/thrift/gen-javabean + src/gen/protobuf/gen-java diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java new file mode 100644 index 0000000..39a7278 --- /dev/null +++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -0,0 +1,34901 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: hbase_metastore_proto.proto + +package org.apache.hadoop.hive.metastore.hbase; + +public final class HbaseMetastoreProto { + private HbaseMetastoreProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PrincipalType} + */ + public enum PrincipalType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * USER = 0; + */ + USER(0, 0), + /** + * ROLE = 1; + */ + ROLE(1, 1), + ; + + /** + * USER = 0; + */ + public static final int USER_VALUE = 0; + /** + * ROLE = 1; + */ + public static final int ROLE_VALUE = 1; + + + public final int getNumber() { return value; } + + public static PrincipalType valueOf(int value) { + switch (value) { + case 0: return USER; + case 1: return ROLE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public PrincipalType findValueByNumber(int number) { + return PrincipalType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.getDescriptor().getEnumTypes().get(0); + } + + private static final PrincipalType[] VALUES = values(); + + public static PrincipalType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private PrincipalType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalType) + } + + public interface AggrStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int64 parts_found = 1; + /** + * required int64 parts_found = 1; + */ + boolean hasPartsFound(); + /** + * required int64 parts_found = 1; + */ + long getPartsFound(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + java.util.List + getColStatsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + int getColStatsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + java.util.List + getColStatsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} + */ + public static final class AggrStats extends + com.google.protobuf.GeneratedMessage + implements AggrStatsOrBuilder { + // Use AggrStats.newBuilder() to construct. + private AggrStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStats defaultInstance; + public static AggrStats getDefaultInstance() { + return defaultInstance; + } + + public AggrStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + partsFound_ = input.readInt64(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + colStats_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = java.util.Collections.unmodifiableList(colStats_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int64 parts_found = 1; + public static final int PARTS_FOUND_FIELD_NUMBER = 1; + private long partsFound_; + /** + * required int64 parts_found = 1; + */ + public boolean hasPartsFound() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 parts_found = 1; + */ + public long getPartsFound() { + return partsFound_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + public static final int COL_STATS_FIELD_NUMBER = 2; + private java.util.List colStats_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List getColStatsList() { + return colStats_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsOrBuilderList() { + return colStats_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public int getColStatsCount() { + return colStats_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { + return colStats_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index) { + return colStats_.get(index); + } + + private void initFields() { + partsFound_ = 0L; + colStats_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPartsFound()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getColStatsCount(); i++) { + if (!getColStats(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, partsFound_); + } + for (int i = 0; i < colStats_.size(); i++) { + output.writeMessage(2, colStats_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, partsFound_); + } + for (int i = 0; i < colStats_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, colStats_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getColStatsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + partsFound_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (colStatsBuilder_ == null) { + colStats_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + colStatsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.partsFound_ = partsFound_; + if (colStatsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = java.util.Collections.unmodifiableList(colStats_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.colStats_ = colStats_; + } else { + result.colStats_ = colStatsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance()) return this; + if (other.hasPartsFound()) { + setPartsFound(other.getPartsFound()); + } + if (colStatsBuilder_ == null) { + if (!other.colStats_.isEmpty()) { + if (colStats_.isEmpty()) { + colStats_ = other.colStats_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureColStatsIsMutable(); + colStats_.addAll(other.colStats_); + } + onChanged(); + } + } else { + if (!other.colStats_.isEmpty()) { + if (colStatsBuilder_.isEmpty()) { + colStatsBuilder_.dispose(); + colStatsBuilder_ = null; + colStats_ = other.colStats_; + bitField0_ = (bitField0_ & ~0x00000002); + colStatsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getColStatsFieldBuilder() : null; + } else { + colStatsBuilder_.addAllMessages(other.colStats_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPartsFound()) { + + return false; + } + for (int i = 0; i < getColStatsCount(); i++) { + if (!getColStats(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int64 parts_found = 1; + private long partsFound_ ; + /** + * required int64 parts_found = 1; + */ + public boolean hasPartsFound() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 parts_found = 1; + */ + public long getPartsFound() { + return partsFound_; + } + /** + * required int64 parts_found = 1; + */ + public Builder setPartsFound(long value) { + bitField0_ |= 0x00000001; + partsFound_ = value; + onChanged(); + return this; + } + /** + * required int64 parts_found = 1; + */ + public Builder clearPartsFound() { + bitField0_ = (bitField0_ & ~0x00000001); + partsFound_ = 0L; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + private java.util.List colStats_ = + java.util.Collections.emptyList(); + private void ensureColStatsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + colStats_ = new java.util.ArrayList(colStats_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> colStatsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List getColStatsList() { + if (colStatsBuilder_ == null) { + return java.util.Collections.unmodifiableList(colStats_); + } else { + return colStatsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public int getColStatsCount() { + if (colStatsBuilder_ == null) { + return colStats_.size(); + } else { + return colStatsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) { + if (colStatsBuilder_ == null) { + return colStats_.get(index); + } else { + return colStatsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder setColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.set(index, value); + onChanged(); + } else { + colStatsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder setColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.set(index, builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.add(value); + onChanged(); + } else { + colStatsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) { + if (colStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColStatsIsMutable(); + colStats_.add(index, value); + onChanged(); + } else { + colStatsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.add(builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addColStats( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.add(index, builderForValue.build()); + onChanged(); + } else { + colStatsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder addAllColStats( + java.lang.Iterable values) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + super.addAll(values, colStats_); + onChanged(); + } else { + colStatsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder clearColStats() { + if (colStatsBuilder_ == null) { + colStats_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + colStatsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public Builder removeColStats(int index) { + if (colStatsBuilder_ == null) { + ensureColStatsIsMutable(); + colStats_.remove(index); + onChanged(); + } else { + colStatsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder getColStatsBuilder( + int index) { + return getColStatsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder( + int index) { + if (colStatsBuilder_ == null) { + return colStats_.get(index); } else { + return colStatsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsOrBuilderList() { + if (colStatsBuilder_ != null) { + return colStatsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(colStats_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder() { + return getColStatsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder( + int index) { + return getColStatsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2; + */ + public java.util.List + getColStatsBuilderList() { + return getColStatsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> + getColStatsFieldBuilder() { + if (colStatsBuilder_ == null) { + colStatsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>( + colStats_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + colStats_ = null; + } + return colStatsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) + } + + static { + defaultInstance = new AggrStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats) + } + + public interface AggrStatsBloomFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes db_name = 1; + /** + * required bytes db_name = 1; + */ + boolean hasDbName(); + /** + * required bytes db_name = 1; + */ + com.google.protobuf.ByteString getDbName(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + boolean hasBloomFilter(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder(); + + // required int64 aggregated_at = 4; + /** + * required int64 aggregated_at = 4; + */ + boolean hasAggregatedAt(); + /** + * required int64 aggregated_at = 4; + */ + long getAggregatedAt(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} + */ + public static final class AggrStatsBloomFilter extends + com.google.protobuf.GeneratedMessage + implements AggrStatsBloomFilterOrBuilder { + // Use AggrStatsBloomFilter.newBuilder() to construct. + private AggrStatsBloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStatsBloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStatsBloomFilter defaultInstance; + public static AggrStatsBloomFilter getDefaultInstance() { + return defaultInstance; + } + + public AggrStatsBloomFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStatsBloomFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + dbName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = bloomFilter_.toBuilder(); + } + bloomFilter_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(bloomFilter_); + bloomFilter_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 32: { + bitField0_ |= 0x00000008; + aggregatedAt_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStatsBloomFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStatsBloomFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface BloomFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required int32 num_bits = 1; + /** + * required int32 num_bits = 1; + */ + boolean hasNumBits(); + /** + * required int32 num_bits = 1; + */ + int getNumBits(); + + // required int32 num_funcs = 2; + /** + * required int32 num_funcs = 2; + */ + boolean hasNumFuncs(); + /** + * required int32 num_funcs = 2; + */ + int getNumFuncs(); + + // repeated int64 bits = 3; + /** + * repeated int64 bits = 3; + */ + java.util.List getBitsList(); + /** + * repeated int64 bits = 3; + */ + int getBitsCount(); + /** + * repeated int64 bits = 3; + */ + long getBits(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} + */ + public static final class BloomFilter extends + com.google.protobuf.GeneratedMessage + implements BloomFilterOrBuilder { + // Use BloomFilter.newBuilder() to construct. + private BloomFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BloomFilter defaultInstance; + public static BloomFilter getDefaultInstance() { + return defaultInstance; + } + + public BloomFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BloomFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + numBits_ = input.readInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + numFuncs_ = input.readInt32(); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + bits_.add(input.readInt64()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + bits_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + bits_.add(input.readInt64()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = java.util.Collections.unmodifiableList(bits_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BloomFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BloomFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required int32 num_bits = 1; + public static final int NUM_BITS_FIELD_NUMBER = 1; + private int numBits_; + /** + * required int32 num_bits = 1; + */ + public boolean hasNumBits() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 num_bits = 1; + */ + public int getNumBits() { + return numBits_; + } + + // required int32 num_funcs = 2; + public static final int NUM_FUNCS_FIELD_NUMBER = 2; + private int numFuncs_; + /** + * required int32 num_funcs = 2; + */ + public boolean hasNumFuncs() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 num_funcs = 2; + */ + public int getNumFuncs() { + return numFuncs_; + } + + // repeated int64 bits = 3; + public static final int BITS_FIELD_NUMBER = 3; + private java.util.List bits_; + /** + * repeated int64 bits = 3; + */ + public java.util.List + getBitsList() { + return bits_; + } + /** + * repeated int64 bits = 3; + */ + public int getBitsCount() { + return bits_.size(); + } + /** + * repeated int64 bits = 3; + */ + public long getBits(int index) { + return bits_.get(index); + } + + private void initFields() { + numBits_ = 0; + numFuncs_ = 0; + bits_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNumBits()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasNumFuncs()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, numBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, numFuncs_); + } + for (int i = 0; i < bits_.size(); i++) { + output.writeInt64(3, bits_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, numBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, numFuncs_); + } + { + int dataSize = 0; + for (int i = 0; i < bits_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeInt64SizeNoTag(bits_.get(i)); + } + size += dataSize; + size += 1 * getBitsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numBits_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + numFuncs_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + bits_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.numBits_ = numBits_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.numFuncs_ = numFuncs_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = java.util.Collections.unmodifiableList(bits_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.bits_ = bits_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) return this; + if (other.hasNumBits()) { + setNumBits(other.getNumBits()); + } + if (other.hasNumFuncs()) { + setNumFuncs(other.getNumFuncs()); + } + if (!other.bits_.isEmpty()) { + if (bits_.isEmpty()) { + bits_ = other.bits_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureBitsIsMutable(); + bits_.addAll(other.bits_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNumBits()) { + + return false; + } + if (!hasNumFuncs()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required int32 num_bits = 1; + private int numBits_ ; + /** + * required int32 num_bits = 1; + */ + public boolean hasNumBits() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int32 num_bits = 1; + */ + public int getNumBits() { + return numBits_; + } + /** + * required int32 num_bits = 1; + */ + public Builder setNumBits(int value) { + bitField0_ |= 0x00000001; + numBits_ = value; + onChanged(); + return this; + } + /** + * required int32 num_bits = 1; + */ + public Builder clearNumBits() { + bitField0_ = (bitField0_ & ~0x00000001); + numBits_ = 0; + onChanged(); + return this; + } + + // required int32 num_funcs = 2; + private int numFuncs_ ; + /** + * required int32 num_funcs = 2; + */ + public boolean hasNumFuncs() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 num_funcs = 2; + */ + public int getNumFuncs() { + return numFuncs_; + } + /** + * required int32 num_funcs = 2; + */ + public Builder setNumFuncs(int value) { + bitField0_ |= 0x00000002; + numFuncs_ = value; + onChanged(); + return this; + } + /** + * required int32 num_funcs = 2; + */ + public Builder clearNumFuncs() { + bitField0_ = (bitField0_ & ~0x00000002); + numFuncs_ = 0; + onChanged(); + return this; + } + + // repeated int64 bits = 3; + private java.util.List bits_ = java.util.Collections.emptyList(); + private void ensureBitsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + bits_ = new java.util.ArrayList(bits_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated int64 bits = 3; + */ + public java.util.List + getBitsList() { + return java.util.Collections.unmodifiableList(bits_); + } + /** + * repeated int64 bits = 3; + */ + public int getBitsCount() { + return bits_.size(); + } + /** + * repeated int64 bits = 3; + */ + public long getBits(int index) { + return bits_.get(index); + } + /** + * repeated int64 bits = 3; + */ + public Builder setBits( + int index, long value) { + ensureBitsIsMutable(); + bits_.set(index, value); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder addBits(long value) { + ensureBitsIsMutable(); + bits_.add(value); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder addAllBits( + java.lang.Iterable values) { + ensureBitsIsMutable(); + super.addAll(values, bits_); + onChanged(); + return this; + } + /** + * repeated int64 bits = 3; + */ + public Builder clearBits() { + bits_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) + } + + static { + defaultInstance = new BloomFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter) + } + + private int bitField0_; + // required bytes db_name = 1; + public static final int DB_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString dbName_; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + public static final int BLOOM_FILTER_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public boolean hasBloomFilter() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { + return bloomFilter_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { + return bloomFilter_; + } + + // required int64 aggregated_at = 4; + public static final int AGGREGATED_AT_FIELD_NUMBER = 4; + private long aggregatedAt_; + /** + * required int64 aggregated_at = 4; + */ + public boolean hasAggregatedAt() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required int64 aggregated_at = 4; + */ + public long getAggregatedAt() { + return aggregatedAt_; + } + + private void initFields() { + dbName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = com.google.protobuf.ByteString.EMPTY; + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + aggregatedAt_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBloomFilter()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasAggregatedAt()) { + memoizedIsInitialized = 0; + return false; + } + if (!getBloomFilter().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, bloomFilter_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, aggregatedAt_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, bloomFilter_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, aggregatedAt_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBloomFilterFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + dbName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + if (bloomFilterBuilder_ == null) { + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + } else { + bloomFilterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + aggregatedAt_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.dbName_ = dbName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (bloomFilterBuilder_ == null) { + result.bloomFilter_ = bloomFilter_; + } else { + result.bloomFilter_ = bloomFilterBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.aggregatedAt_ = aggregatedAt_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance()) return this; + if (other.hasDbName()) { + setDbName(other.getDbName()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasBloomFilter()) { + mergeBloomFilter(other.getBloomFilter()); + } + if (other.hasAggregatedAt()) { + setAggregatedAt(other.getAggregatedAt()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasBloomFilter()) { + + return false; + } + if (!hasAggregatedAt()) { + + return false; + } + if (!getBloomFilter().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes db_name = 1; + private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + /** + * required bytes db_name = 1; + */ + public Builder setDbName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + /** + * required bytes db_name = 1; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> bloomFilterBuilder_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public boolean hasBloomFilter() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() { + if (bloomFilterBuilder_ == null) { + return bloomFilter_; + } else { + return bloomFilterBuilder_.getMessage(); + } + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder setBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { + if (bloomFilterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + bloomFilter_ = value; + onChanged(); + } else { + bloomFilterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder setBloomFilter( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder builderForValue) { + if (bloomFilterBuilder_ == null) { + bloomFilter_ = builderForValue.build(); + onChanged(); + } else { + bloomFilterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder mergeBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) { + if (bloomFilterBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + bloomFilter_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) { + bloomFilter_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder(bloomFilter_).mergeFrom(value).buildPartial(); + } else { + bloomFilter_ = value; + } + onChanged(); + } else { + bloomFilterBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public Builder clearBloomFilter() { + if (bloomFilterBuilder_ == null) { + bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance(); + onChanged(); + } else { + bloomFilterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder getBloomFilterBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getBloomFilterFieldBuilder().getBuilder(); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() { + if (bloomFilterBuilder_ != null) { + return bloomFilterBuilder_.getMessageOrBuilder(); + } else { + return bloomFilter_; + } + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> + getBloomFilterFieldBuilder() { + if (bloomFilterBuilder_ == null) { + bloomFilterBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder>( + bloomFilter_, + getParentForChildren(), + isClean()); + bloomFilter_ = null; + } + return bloomFilterBuilder_; + } + + // required int64 aggregated_at = 4; + private long aggregatedAt_ ; + /** + * required int64 aggregated_at = 4; + */ + public boolean hasAggregatedAt() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required int64 aggregated_at = 4; + */ + public long getAggregatedAt() { + return aggregatedAt_; + } + /** + * required int64 aggregated_at = 4; + */ + public Builder setAggregatedAt(long value) { + bitField0_ |= 0x00000008; + aggregatedAt_ = value; + onChanged(); + return this; + } + /** + * required int64 aggregated_at = 4; + */ + public Builder clearAggregatedAt() { + bitField0_ = (bitField0_ & ~0x00000008); + aggregatedAt_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) + } + + static { + defaultInstance = new AggrStatsBloomFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter) + } + + public interface AggrStatsInvalidatorFilterOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + java.util.List + getToInvalidateList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + int getToInvalidateCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + java.util.List + getToInvalidateOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index); + + // required int64 run_every = 2; + /** + * required int64 run_every = 2; + */ + boolean hasRunEvery(); + /** + * required int64 run_every = 2; + */ + long getRunEvery(); + + // required int64 max_cache_entry_life = 3; + /** + * required int64 max_cache_entry_life = 3; + */ + boolean hasMaxCacheEntryLife(); + /** + * required int64 max_cache_entry_life = 3; + */ + long getMaxCacheEntryLife(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} + */ + public static final class AggrStatsInvalidatorFilter extends + com.google.protobuf.GeneratedMessage + implements AggrStatsInvalidatorFilterOrBuilder { + // Use AggrStatsInvalidatorFilter.newBuilder() to construct. + private AggrStatsInvalidatorFilter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AggrStatsInvalidatorFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AggrStatsInvalidatorFilter defaultInstance; + public static AggrStatsInvalidatorFilter getDefaultInstance() { + return defaultInstance; + } + + public AggrStatsInvalidatorFilter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AggrStatsInvalidatorFilter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + toInvalidate_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.PARSER, extensionRegistry)); + break; + } + case 16: { + bitField0_ |= 0x00000001; + runEvery_ = input.readInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000002; + maxCacheEntryLife_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggrStatsInvalidatorFilter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggrStatsInvalidatorFilter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface EntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes db_name = 1; + /** + * required bytes db_name = 1; + */ + boolean hasDbName(); + /** + * required bytes db_name = 1; + */ + com.google.protobuf.ByteString getDbName(); + + // required bytes table_name = 2; + /** + * required bytes table_name = 2; + */ + boolean hasTableName(); + /** + * required bytes table_name = 2; + */ + com.google.protobuf.ByteString getTableName(); + + // required bytes part_name = 3; + /** + * required bytes part_name = 3; + */ + boolean hasPartName(); + /** + * required bytes part_name = 3; + */ + com.google.protobuf.ByteString getPartName(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} + */ + public static final class Entry extends + com.google.protobuf.GeneratedMessage + implements EntryOrBuilder { + // Use Entry.newBuilder() to construct. + private Entry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Entry defaultInstance; + public static Entry getDefaultInstance() { + return defaultInstance; + } + + public Entry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Entry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + dbName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + tableName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + partName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Entry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Entry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes db_name = 1; + public static final int DB_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString dbName_; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + + // required bytes table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString tableName_; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required bytes part_name = 3; + public static final int PART_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString partName_; + /** + * required bytes part_name = 3; + */ + public boolean hasPartName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes part_name = 3; + */ + public com.google.protobuf.ByteString getPartName() { + return partName_; + } + + private void initFields() { + dbName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = com.google.protobuf.ByteString.EMPTY; + partName_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasDbName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPartName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, partName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, dbName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, partName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + dbName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + partName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.dbName_ = dbName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.partName_ = partName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()) return this; + if (other.hasDbName()) { + setDbName(other.getDbName()); + } + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasPartName()) { + setPartName(other.getPartName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasDbName()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasPartName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes db_name = 1; + private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes db_name = 1; + */ + public boolean hasDbName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes db_name = 1; + */ + public com.google.protobuf.ByteString getDbName() { + return dbName_; + } + /** + * required bytes db_name = 1; + */ + public Builder setDbName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + dbName_ = value; + onChanged(); + return this; + } + /** + * required bytes db_name = 1; + */ + public Builder clearDbName() { + bitField0_ = (bitField0_ & ~0x00000001); + dbName_ = getDefaultInstance().getDbName(); + onChanged(); + return this; + } + + // required bytes table_name = 2; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bytes table_name = 2; + */ + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + /** + * required bytes table_name = 2; + */ + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + tableName_ = value; + onChanged(); + return this; + } + /** + * required bytes table_name = 2; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000002); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required bytes part_name = 3; + private com.google.protobuf.ByteString partName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes part_name = 3; + */ + public boolean hasPartName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes part_name = 3; + */ + public com.google.protobuf.ByteString getPartName() { + return partName_; + } + /** + * required bytes part_name = 3; + */ + public Builder setPartName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + partName_ = value; + onChanged(); + return this; + } + /** + * required bytes part_name = 3; + */ + public Builder clearPartName() { + bitField0_ = (bitField0_ & ~0x00000004); + partName_ = getDefaultInstance().getPartName(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) + } + + static { + defaultInstance = new Entry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry) + } + + private int bitField0_; + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + public static final int TO_INVALIDATE_FIELD_NUMBER = 1; + private java.util.List toInvalidate_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List getToInvalidateList() { + return toInvalidate_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateOrBuilderList() { + return toInvalidate_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public int getToInvalidateCount() { + return toInvalidate_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { + return toInvalidate_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index) { + return toInvalidate_.get(index); + } + + // required int64 run_every = 2; + public static final int RUN_EVERY_FIELD_NUMBER = 2; + private long runEvery_; + /** + * required int64 run_every = 2; + */ + public boolean hasRunEvery() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required int64 run_every = 2; + */ + public long getRunEvery() { + return runEvery_; + } + + // required int64 max_cache_entry_life = 3; + public static final int MAX_CACHE_ENTRY_LIFE_FIELD_NUMBER = 3; + private long maxCacheEntryLife_; + /** + * required int64 max_cache_entry_life = 3; + */ + public boolean hasMaxCacheEntryLife() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 max_cache_entry_life = 3; + */ + public long getMaxCacheEntryLife() { + return maxCacheEntryLife_; + } + + private void initFields() { + toInvalidate_ = java.util.Collections.emptyList(); + runEvery_ = 0L; + maxCacheEntryLife_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRunEvery()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMaxCacheEntryLife()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getToInvalidateCount(); i++) { + if (!getToInvalidate(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < toInvalidate_.size(); i++) { + output.writeMessage(1, toInvalidate_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(2, runEvery_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(3, maxCacheEntryLife_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < toInvalidate_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, toInvalidate_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, runEvery_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, maxCacheEntryLife_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getToInvalidateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (toInvalidateBuilder_ == null) { + toInvalidate_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + toInvalidateBuilder_.clear(); + } + runEvery_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + maxCacheEntryLife_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (toInvalidateBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = java.util.Collections.unmodifiableList(toInvalidate_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.toInvalidate_ = toInvalidate_; + } else { + result.toInvalidate_ = toInvalidateBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.runEvery_ = runEvery_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.maxCacheEntryLife_ = maxCacheEntryLife_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.getDefaultInstance()) return this; + if (toInvalidateBuilder_ == null) { + if (!other.toInvalidate_.isEmpty()) { + if (toInvalidate_.isEmpty()) { + toInvalidate_ = other.toInvalidate_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureToInvalidateIsMutable(); + toInvalidate_.addAll(other.toInvalidate_); + } + onChanged(); + } + } else { + if (!other.toInvalidate_.isEmpty()) { + if (toInvalidateBuilder_.isEmpty()) { + toInvalidateBuilder_.dispose(); + toInvalidateBuilder_ = null; + toInvalidate_ = other.toInvalidate_; + bitField0_ = (bitField0_ & ~0x00000001); + toInvalidateBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getToInvalidateFieldBuilder() : null; + } else { + toInvalidateBuilder_.addAllMessages(other.toInvalidate_); + } + } + } + if (other.hasRunEvery()) { + setRunEvery(other.getRunEvery()); + } + if (other.hasMaxCacheEntryLife()) { + setMaxCacheEntryLife(other.getMaxCacheEntryLife()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRunEvery()) { + + return false; + } + if (!hasMaxCacheEntryLife()) { + + return false; + } + for (int i = 0; i < getToInvalidateCount(); i++) { + if (!getToInvalidate(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + private java.util.List toInvalidate_ = + java.util.Collections.emptyList(); + private void ensureToInvalidateIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + toInvalidate_ = new java.util.ArrayList(toInvalidate_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> toInvalidateBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List getToInvalidateList() { + if (toInvalidateBuilder_ == null) { + return java.util.Collections.unmodifiableList(toInvalidate_); + } else { + return toInvalidateBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public int getToInvalidateCount() { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.size(); + } else { + return toInvalidateBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry getToInvalidate(int index) { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.get(index); + } else { + return toInvalidateBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder setToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.set(index, value); + onChanged(); + } else { + toInvalidateBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder setToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.set(index, builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.add(value); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry value) { + if (toInvalidateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureToInvalidateIsMutable(); + toInvalidate_.add(index, value); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.add(builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addToInvalidate( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder builderForValue) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.add(index, builderForValue.build()); + onChanged(); + } else { + toInvalidateBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder addAllToInvalidate( + java.lang.Iterable values) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + super.addAll(values, toInvalidate_); + onChanged(); + } else { + toInvalidateBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder clearToInvalidate() { + if (toInvalidateBuilder_ == null) { + toInvalidate_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + toInvalidateBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public Builder removeToInvalidate(int index) { + if (toInvalidateBuilder_ == null) { + ensureToInvalidateIsMutable(); + toInvalidate_.remove(index); + onChanged(); + } else { + toInvalidateBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder getToInvalidateBuilder( + int index) { + return getToInvalidateFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder getToInvalidateOrBuilder( + int index) { + if (toInvalidateBuilder_ == null) { + return toInvalidate_.get(index); } else { + return toInvalidateBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateOrBuilderList() { + if (toInvalidateBuilder_ != null) { + return toInvalidateBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(toInvalidate_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder() { + return getToInvalidateFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder addToInvalidateBuilder( + int index) { + return getToInvalidateFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter.Entry to_invalidate = 1; + */ + public java.util.List + getToInvalidateBuilderList() { + return getToInvalidateFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder> + getToInvalidateFieldBuilder() { + if (toInvalidateBuilder_ == null) { + toInvalidateBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsInvalidatorFilter.EntryOrBuilder>( + toInvalidate_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + toInvalidate_ = null; + } + return toInvalidateBuilder_; + } + + // required int64 run_every = 2; + private long runEvery_ ; + /** + * required int64 run_every = 2; + */ + public boolean hasRunEvery() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int64 run_every = 2; + */ + public long getRunEvery() { + return runEvery_; + } + /** + * required int64 run_every = 2; + */ + public Builder setRunEvery(long value) { + bitField0_ |= 0x00000002; + runEvery_ = value; + onChanged(); + return this; + } + /** + * required int64 run_every = 2; + */ + public Builder clearRunEvery() { + bitField0_ = (bitField0_ & ~0x00000002); + runEvery_ = 0L; + onChanged(); + return this; + } + + // required int64 max_cache_entry_life = 3; + private long maxCacheEntryLife_ ; + /** + * required int64 max_cache_entry_life = 3; + */ + public boolean hasMaxCacheEntryLife() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required int64 max_cache_entry_life = 3; + */ + public long getMaxCacheEntryLife() { + return maxCacheEntryLife_; + } + /** + * required int64 max_cache_entry_life = 3; + */ + public Builder setMaxCacheEntryLife(long value) { + bitField0_ |= 0x00000004; + maxCacheEntryLife_ = value; + onChanged(); + return this; + } + /** + * required int64 max_cache_entry_life = 3; + */ + public Builder clearMaxCacheEntryLife() { + bitField0_ = (bitField0_ & ~0x00000004); + maxCacheEntryLife_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) + } + + static { + defaultInstance = new AggrStatsInvalidatorFilter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsInvalidatorFilter) + } + + public interface ColumnStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 last_analyzed = 1; + /** + * optional int64 last_analyzed = 1; + */ + boolean hasLastAnalyzed(); + /** + * optional int64 last_analyzed = 1; + */ + long getLastAnalyzed(); + + // required string column_type = 2; + /** + * required string column_type = 2; + */ + boolean hasColumnType(); + /** + * required string column_type = 2; + */ + java.lang.String getColumnType(); + /** + * required string column_type = 2; + */ + com.google.protobuf.ByteString + getColumnTypeBytes(); + + // optional int64 num_nulls = 3; + /** + * optional int64 num_nulls = 3; + */ + boolean hasNumNulls(); + /** + * optional int64 num_nulls = 3; + */ + long getNumNulls(); + + // optional int64 num_distinct_values = 4; + /** + * optional int64 num_distinct_values = 4; + */ + boolean hasNumDistinctValues(); + /** + * optional int64 num_distinct_values = 4; + */ + long getNumDistinctValues(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + boolean hasBoolStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + boolean hasLongStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + boolean hasDoubleStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + boolean hasStringStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + boolean hasBinaryStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + boolean hasDecimalStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder(); + + // optional string column_name = 11; + /** + * optional string column_name = 11; + */ + boolean hasColumnName(); + /** + * optional string column_name = 11; + */ + java.lang.String getColumnName(); + /** + * optional string column_name = 11; + */ + com.google.protobuf.ByteString + getColumnNameBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} + */ + public static final class ColumnStats extends + com.google.protobuf.GeneratedMessage + implements ColumnStatsOrBuilder { + // Use ColumnStats.newBuilder() to construct. + private ColumnStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ColumnStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ColumnStats defaultInstance; + public static ColumnStats getDefaultInstance() { + return defaultInstance; + } + + public ColumnStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ColumnStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lastAnalyzed_ = input.readInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + columnType_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + numNulls_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + numDistinctValues_ = input.readInt64(); + break; + } + case 42: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = boolStats_.toBuilder(); + } + boolStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(boolStats_); + boolStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = longStats_.toBuilder(); + } + longStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(longStats_); + longStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 58: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + subBuilder = doubleStats_.toBuilder(); + } + doubleStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(doubleStats_); + doubleStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000040; + break; + } + case 66: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = stringStats_.toBuilder(); + } + stringStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stringStats_); + stringStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = binaryStats_.toBuilder(); + } + binaryStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(binaryStats_); + binaryStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } + case 82: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder subBuilder = null; + if (((bitField0_ & 0x00000200) == 0x00000200)) { + subBuilder = decimalStats_.toBuilder(); + } + decimalStats_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(decimalStats_); + decimalStats_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000200; + break; + } + case 90: { + bitField0_ |= 0x00000400; + columnName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ColumnStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ColumnStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface BooleanStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 num_trues = 1; + /** + * optional int64 num_trues = 1; + */ + boolean hasNumTrues(); + /** + * optional int64 num_trues = 1; + */ + long getNumTrues(); + + // optional int64 num_falses = 2; + /** + * optional int64 num_falses = 2; + */ + boolean hasNumFalses(); + /** + * optional int64 num_falses = 2; + */ + long getNumFalses(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} + */ + public static final class BooleanStats extends + com.google.protobuf.GeneratedMessage + implements BooleanStatsOrBuilder { + // Use BooleanStats.newBuilder() to construct. + private BooleanStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BooleanStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BooleanStats defaultInstance; + public static BooleanStats getDefaultInstance() { + return defaultInstance; + } + + public BooleanStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BooleanStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + numTrues_ = input.readInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + numFalses_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BooleanStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BooleanStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 num_trues = 1; + public static final int NUM_TRUES_FIELD_NUMBER = 1; + private long numTrues_; + /** + * optional int64 num_trues = 1; + */ + public boolean hasNumTrues() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 num_trues = 1; + */ + public long getNumTrues() { + return numTrues_; + } + + // optional int64 num_falses = 2; + public static final int NUM_FALSES_FIELD_NUMBER = 2; + private long numFalses_; + /** + * optional int64 num_falses = 2; + */ + public boolean hasNumFalses() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 num_falses = 2; + */ + public long getNumFalses() { + return numFalses_; + } + + private void initFields() { + numTrues_ = 0L; + numFalses_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, numTrues_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, numFalses_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, numTrues_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, numFalses_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + numTrues_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + numFalses_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.numTrues_ = numTrues_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.numFalses_ = numFalses_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) return this; + if (other.hasNumTrues()) { + setNumTrues(other.getNumTrues()); + } + if (other.hasNumFalses()) { + setNumFalses(other.getNumFalses()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 num_trues = 1; + private long numTrues_ ; + /** + * optional int64 num_trues = 1; + */ + public boolean hasNumTrues() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 num_trues = 1; + */ + public long getNumTrues() { + return numTrues_; + } + /** + * optional int64 num_trues = 1; + */ + public Builder setNumTrues(long value) { + bitField0_ |= 0x00000001; + numTrues_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_trues = 1; + */ + public Builder clearNumTrues() { + bitField0_ = (bitField0_ & ~0x00000001); + numTrues_ = 0L; + onChanged(); + return this; + } + + // optional int64 num_falses = 2; + private long numFalses_ ; + /** + * optional int64 num_falses = 2; + */ + public boolean hasNumFalses() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 num_falses = 2; + */ + public long getNumFalses() { + return numFalses_; + } + /** + * optional int64 num_falses = 2; + */ + public Builder setNumFalses(long value) { + bitField0_ |= 0x00000002; + numFalses_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_falses = 2; + */ + public Builder clearNumFalses() { + bitField0_ = (bitField0_ & ~0x00000002); + numFalses_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) + } + + static { + defaultInstance = new BooleanStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats) + } + + public interface LongStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional sint64 low_value = 1; + /** + * optional sint64 low_value = 1; + */ + boolean hasLowValue(); + /** + * optional sint64 low_value = 1; + */ + long getLowValue(); + + // optional sint64 high_value = 2; + /** + * optional sint64 high_value = 2; + */ + boolean hasHighValue(); + /** + * optional sint64 high_value = 2; + */ + long getHighValue(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} + */ + public static final class LongStats extends + com.google.protobuf.GeneratedMessage + implements LongStatsOrBuilder { + // Use LongStats.newBuilder() to construct. + private LongStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private LongStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LongStats defaultInstance; + public static LongStats getDefaultInstance() { + return defaultInstance; + } + + public LongStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LongStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lowValue_ = input.readSInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + highValue_ = input.readSInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LongStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LongStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional sint64 low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private long lowValue_; + /** + * optional sint64 low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 low_value = 1; + */ + public long getLowValue() { + return lowValue_; + } + + // optional sint64 high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private long highValue_; + /** + * optional sint64 high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 high_value = 2; + */ + public long getHighValue() { + return highValue_; + } + + private void initFields() { + lowValue_ = 0L; + highValue_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeSInt64(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt64(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lowValue_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + highValue_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lowValue_ = lowValue_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.highValue_ = highValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + setLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + setHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional sint64 low_value = 1; + private long lowValue_ ; + /** + * optional sint64 low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 low_value = 1; + */ + public long getLowValue() { + return lowValue_; + } + /** + * optional sint64 low_value = 1; + */ + public Builder setLowValue(long value) { + bitField0_ |= 0x00000001; + lowValue_ = value; + onChanged(); + return this; + } + /** + * optional sint64 low_value = 1; + */ + public Builder clearLowValue() { + bitField0_ = (bitField0_ & ~0x00000001); + lowValue_ = 0L; + onChanged(); + return this; + } + + // optional sint64 high_value = 2; + private long highValue_ ; + /** + * optional sint64 high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 high_value = 2; + */ + public long getHighValue() { + return highValue_; + } + /** + * optional sint64 high_value = 2; + */ + public Builder setHighValue(long value) { + bitField0_ |= 0x00000002; + highValue_ = value; + onChanged(); + return this; + } + /** + * optional sint64 high_value = 2; + */ + public Builder clearHighValue() { + bitField0_ = (bitField0_ & ~0x00000002); + highValue_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) + } + + static { + defaultInstance = new LongStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats) + } + + public interface DoubleStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional double low_value = 1; + /** + * optional double low_value = 1; + */ + boolean hasLowValue(); + /** + * optional double low_value = 1; + */ + double getLowValue(); + + // optional double high_value = 2; + /** + * optional double high_value = 2; + */ + boolean hasHighValue(); + /** + * optional double high_value = 2; + */ + double getHighValue(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} + */ + public static final class DoubleStats extends + com.google.protobuf.GeneratedMessage + implements DoubleStatsOrBuilder { + // Use DoubleStats.newBuilder() to construct. + private DoubleStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DoubleStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DoubleStats defaultInstance; + public static DoubleStats getDefaultInstance() { + return defaultInstance; + } + + public DoubleStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DoubleStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + lowValue_ = input.readDouble(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + highValue_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DoubleStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DoubleStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional double low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private double lowValue_; + /** + * optional double low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double low_value = 1; + */ + public double getLowValue() { + return lowValue_; + } + + // optional double high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private double highValue_; + /** + * optional double high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double high_value = 2; + */ + public double getHighValue() { + return highValue_; + } + + private void initFields() { + lowValue_ = 0D; + highValue_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lowValue_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + highValue_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lowValue_ = lowValue_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.highValue_ = highValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + setLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + setHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional double low_value = 1; + private double lowValue_ ; + /** + * optional double low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double low_value = 1; + */ + public double getLowValue() { + return lowValue_; + } + /** + * optional double low_value = 1; + */ + public Builder setLowValue(double value) { + bitField0_ |= 0x00000001; + lowValue_ = value; + onChanged(); + return this; + } + /** + * optional double low_value = 1; + */ + public Builder clearLowValue() { + bitField0_ = (bitField0_ & ~0x00000001); + lowValue_ = 0D; + onChanged(); + return this; + } + + // optional double high_value = 2; + private double highValue_ ; + /** + * optional double high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double high_value = 2; + */ + public double getHighValue() { + return highValue_; + } + /** + * optional double high_value = 2; + */ + public Builder setHighValue(double value) { + bitField0_ |= 0x00000002; + highValue_ = value; + onChanged(); + return this; + } + /** + * optional double high_value = 2; + */ + public Builder clearHighValue() { + bitField0_ = (bitField0_ & ~0x00000002); + highValue_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) + } + + static { + defaultInstance = new DoubleStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats) + } + + public interface StringStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 max_col_length = 1; + /** + * optional int64 max_col_length = 1; + */ + boolean hasMaxColLength(); + /** + * optional int64 max_col_length = 1; + */ + long getMaxColLength(); + + // optional double avg_col_length = 2; + /** + * optional double avg_col_length = 2; + */ + boolean hasAvgColLength(); + /** + * optional double avg_col_length = 2; + */ + double getAvgColLength(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + */ + public static final class StringStats extends + com.google.protobuf.GeneratedMessage + implements StringStatsOrBuilder { + // Use StringStats.newBuilder() to construct. + private StringStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StringStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StringStats defaultInstance; + public static StringStats getDefaultInstance() { + return defaultInstance; + } + + public StringStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StringStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + maxColLength_ = input.readInt64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + avgColLength_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StringStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StringStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 max_col_length = 1; + public static final int MAX_COL_LENGTH_FIELD_NUMBER = 1; + private long maxColLength_; + /** + * optional int64 max_col_length = 1; + */ + public boolean hasMaxColLength() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 max_col_length = 1; + */ + public long getMaxColLength() { + return maxColLength_; + } + + // optional double avg_col_length = 2; + public static final int AVG_COL_LENGTH_FIELD_NUMBER = 2; + private double avgColLength_; + /** + * optional double avg_col_length = 2; + */ + public boolean hasAvgColLength() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double avg_col_length = 2; + */ + public double getAvgColLength() { + return avgColLength_; + } + + private void initFields() { + maxColLength_ = 0L; + avgColLength_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, maxColLength_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, avgColLength_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, maxColLength_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, avgColLength_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + maxColLength_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + avgColLength_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.maxColLength_ = maxColLength_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.avgColLength_ = avgColLength_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) return this; + if (other.hasMaxColLength()) { + setMaxColLength(other.getMaxColLength()); + } + if (other.hasAvgColLength()) { + setAvgColLength(other.getAvgColLength()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 max_col_length = 1; + private long maxColLength_ ; + /** + * optional int64 max_col_length = 1; + */ + public boolean hasMaxColLength() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 max_col_length = 1; + */ + public long getMaxColLength() { + return maxColLength_; + } + /** + * optional int64 max_col_length = 1; + */ + public Builder setMaxColLength(long value) { + bitField0_ |= 0x00000001; + maxColLength_ = value; + onChanged(); + return this; + } + /** + * optional int64 max_col_length = 1; + */ + public Builder clearMaxColLength() { + bitField0_ = (bitField0_ & ~0x00000001); + maxColLength_ = 0L; + onChanged(); + return this; + } + + // optional double avg_col_length = 2; + private double avgColLength_ ; + /** + * optional double avg_col_length = 2; + */ + public boolean hasAvgColLength() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double avg_col_length = 2; + */ + public double getAvgColLength() { + return avgColLength_; + } + /** + * optional double avg_col_length = 2; + */ + public Builder setAvgColLength(double value) { + bitField0_ |= 0x00000002; + avgColLength_ = value; + onChanged(); + return this; + } + /** + * optional double avg_col_length = 2; + */ + public Builder clearAvgColLength() { + bitField0_ = (bitField0_ & ~0x00000002); + avgColLength_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) + } + + static { + defaultInstance = new StringStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats) + } + + public interface DecimalStatsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + boolean hasLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + boolean hasHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} + */ + public static final class DecimalStats extends + com.google.protobuf.GeneratedMessage + implements DecimalStatsOrBuilder { + // Use DecimalStats.newBuilder() to construct. + private DecimalStats(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DecimalStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DecimalStats defaultInstance; + public static DecimalStats getDefaultInstance() { + return defaultInstance; + } + + public DecimalStats getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DecimalStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = lowValue_.toBuilder(); + } + lowValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(lowValue_); + lowValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = highValue_.toBuilder(); + } + highValue_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(highValue_); + highValue_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DecimalStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DecimalStats(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface DecimalOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes unscaled = 1; + /** + * required bytes unscaled = 1; + */ + boolean hasUnscaled(); + /** + * required bytes unscaled = 1; + */ + com.google.protobuf.ByteString getUnscaled(); + + // required int32 scale = 2; + /** + * required int32 scale = 2; + */ + boolean hasScale(); + /** + * required int32 scale = 2; + */ + int getScale(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} + */ + public static final class Decimal extends + com.google.protobuf.GeneratedMessage + implements DecimalOrBuilder { + // Use Decimal.newBuilder() to construct. + private Decimal(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Decimal(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Decimal defaultInstance; + public static Decimal getDefaultInstance() { + return defaultInstance; + } + + public Decimal getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Decimal( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + unscaled_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + scale_ = input.readInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Decimal parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Decimal(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes unscaled = 1; + public static final int UNSCALED_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString unscaled_; + /** + * required bytes unscaled = 1; + */ + public boolean hasUnscaled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes unscaled = 1; + */ + public com.google.protobuf.ByteString getUnscaled() { + return unscaled_; + } + + // required int32 scale = 2; + public static final int SCALE_FIELD_NUMBER = 2; + private int scale_; + /** + * required int32 scale = 2; + */ + public boolean hasScale() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 scale = 2; + */ + public int getScale() { + return scale_; + } + + private void initFields() { + unscaled_ = com.google.protobuf.ByteString.EMPTY; + scale_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUnscaled()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasScale()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, unscaled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt32(2, scale_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, unscaled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(2, scale_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + unscaled_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + scale_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.unscaled_ = unscaled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.scale_ = scale_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) return this; + if (other.hasUnscaled()) { + setUnscaled(other.getUnscaled()); + } + if (other.hasScale()) { + setScale(other.getScale()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUnscaled()) { + + return false; + } + if (!hasScale()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes unscaled = 1; + private com.google.protobuf.ByteString unscaled_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes unscaled = 1; + */ + public boolean hasUnscaled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes unscaled = 1; + */ + public com.google.protobuf.ByteString getUnscaled() { + return unscaled_; + } + /** + * required bytes unscaled = 1; + */ + public Builder setUnscaled(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + unscaled_ = value; + onChanged(); + return this; + } + /** + * required bytes unscaled = 1; + */ + public Builder clearUnscaled() { + bitField0_ = (bitField0_ & ~0x00000001); + unscaled_ = getDefaultInstance().getUnscaled(); + onChanged(); + return this; + } + + // required int32 scale = 2; + private int scale_ ; + /** + * required int32 scale = 2; + */ + public boolean hasScale() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required int32 scale = 2; + */ + public int getScale() { + return scale_; + } + /** + * required int32 scale = 2; + */ + public Builder setScale(int value) { + bitField0_ |= 0x00000002; + scale_ = value; + onChanged(); + return this; + } + /** + * required int32 scale = 2; + */ + public Builder clearScale() { + bitField0_ = (bitField0_ & ~0x00000002); + scale_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) + } + + static { + defaultInstance = new Decimal(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal) + } + + private int bitField0_; + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + public static final int LOW_VALUE_FIELD_NUMBER = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { + return lowValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { + return lowValue_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + public static final int HIGH_VALUE_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { + return highValue_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { + return highValue_; + } + + private void initFields() { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, highValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, lowValue_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, highValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLowValueFieldBuilder(); + getHighValueFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (lowValueBuilder_ == null) { + result.lowValue_ = lowValue_; + } else { + result.lowValue_ = lowValueBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (highValueBuilder_ == null) { + result.highValue_ = highValue_; + } else { + result.highValue_ = highValueBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) return this; + if (other.hasLowValue()) { + mergeLowValue(other.getLowValue()); + } + if (other.hasHighValue()) { + mergeHighValue(other.getHighValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasLowValue()) { + if (!getLowValue().isInitialized()) { + + return false; + } + } + if (hasHighValue()) { + if (!getHighValue().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> lowValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public boolean hasLowValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getLowValue() { + if (lowValueBuilder_ == null) { + return lowValue_; + } else { + return lowValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder setLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (lowValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + lowValue_ = value; + onChanged(); + } else { + lowValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder setLowValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { + if (lowValueBuilder_ == null) { + lowValue_ = builderForValue.build(); + onChanged(); + } else { + lowValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder mergeLowValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (lowValueBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + lowValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { + lowValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(lowValue_).mergeFrom(value).buildPartial(); + } else { + lowValue_ = value; + } + onChanged(); + } else { + lowValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public Builder clearLowValue() { + if (lowValueBuilder_ == null) { + lowValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + onChanged(); + } else { + lowValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getLowValueBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getLowValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getLowValueOrBuilder() { + if (lowValueBuilder_ != null) { + return lowValueBuilder_.getMessageOrBuilder(); + } else { + return lowValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal low_value = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> + getLowValueFieldBuilder() { + if (lowValueBuilder_ == null) { + lowValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( + lowValue_, + getParentForChildren(), + isClean()); + lowValue_ = null; + } + return lowValueBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> highValueBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public boolean hasHighValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal getHighValue() { + if (highValueBuilder_ == null) { + return highValue_; + } else { + return highValueBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder setHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (highValueBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + highValue_ = value; + onChanged(); + } else { + highValueBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder setHighValue( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder builderForValue) { + if (highValueBuilder_ == null) { + highValue_ = builderForValue.build(); + onChanged(); + } else { + highValueBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder mergeHighValue(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal value) { + if (highValueBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + highValue_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance()) { + highValue_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder(highValue_).mergeFrom(value).buildPartial(); + } else { + highValue_ = value; + } + onChanged(); + } else { + highValueBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public Builder clearHighValue() { + if (highValueBuilder_ == null) { + highValue_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.getDefaultInstance(); + onChanged(); + } else { + highValueBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder getHighValueBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getHighValueFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder getHighValueOrBuilder() { + if (highValueBuilder_ != null) { + return highValueBuilder_.getMessageOrBuilder(); + } else { + return highValue_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats.Decimal high_value = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder> + getHighValueFieldBuilder() { + if (highValueBuilder_ == null) { + highValueBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.DecimalOrBuilder>( + highValue_, + getParentForChildren(), + isClean()); + highValue_ = null; + } + return highValueBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) + } + + static { + defaultInstance = new DecimalStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats) + } + + private int bitField0_; + // optional int64 last_analyzed = 1; + public static final int LAST_ANALYZED_FIELD_NUMBER = 1; + private long lastAnalyzed_; + /** + * optional int64 last_analyzed = 1; + */ + public boolean hasLastAnalyzed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 last_analyzed = 1; + */ + public long getLastAnalyzed() { + return lastAnalyzed_; + } + + // required string column_type = 2; + public static final int COLUMN_TYPE_FIELD_NUMBER = 2; + private java.lang.Object columnType_; + /** + * required string column_type = 2; + */ + public boolean hasColumnType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string column_type = 2; + */ + public java.lang.String getColumnType() { + java.lang.Object ref = columnType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnType_ = s; + } + return s; + } + } + /** + * required string column_type = 2; + */ + public com.google.protobuf.ByteString + getColumnTypeBytes() { + java.lang.Object ref = columnType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 num_nulls = 3; + public static final int NUM_NULLS_FIELD_NUMBER = 3; + private long numNulls_; + /** + * optional int64 num_nulls = 3; + */ + public boolean hasNumNulls() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 num_nulls = 3; + */ + public long getNumNulls() { + return numNulls_; + } + + // optional int64 num_distinct_values = 4; + public static final int NUM_DISTINCT_VALUES_FIELD_NUMBER = 4; + private long numDistinctValues_; + /** + * optional int64 num_distinct_values = 4; + */ + public boolean hasNumDistinctValues() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 num_distinct_values = 4; + */ + public long getNumDistinctValues() { + return numDistinctValues_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + public static final int BOOL_STATS_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public boolean hasBoolStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { + return boolStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { + return boolStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + public static final int LONG_STATS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public boolean hasLongStats() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { + return longStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { + return longStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + public static final int DOUBLE_STATS_FIELD_NUMBER = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public boolean hasDoubleStats() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { + return doubleStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { + return doubleStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + public static final int STRING_STATS_FIELD_NUMBER = 8; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public boolean hasStringStats() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { + return stringStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { + return stringStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + public static final int BINARY_STATS_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public boolean hasBinaryStats() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { + return binaryStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { + return binaryStats_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + public static final int DECIMAL_STATS_FIELD_NUMBER = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public boolean hasDecimalStats() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { + return decimalStats_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { + return decimalStats_; + } + + // optional string column_name = 11; + public static final int COLUMN_NAME_FIELD_NUMBER = 11; + private java.lang.Object columnName_; + /** + * optional string column_name = 11; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string column_name = 11; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnName_ = s; + } + return s; + } + } + /** + * optional string column_name = 11; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + lastAnalyzed_ = 0L; + columnType_ = ""; + numNulls_ = 0L; + numDistinctValues_ = 0L; + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + columnName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasColumnType()) { + memoizedIsInitialized = 0; + return false; + } + if (hasDecimalStats()) { + if (!getDecimalStats().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, lastAnalyzed_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getColumnTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, numNulls_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, numDistinctValues_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, boolStats_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, longStats_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeMessage(7, doubleStats_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(8, stringStats_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(9, binaryStats_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeMessage(10, decimalStats_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(11, getColumnNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, lastAnalyzed_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getColumnTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, numNulls_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, numDistinctValues_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, boolStats_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, longStats_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, doubleStats_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, stringStats_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, binaryStats_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, decimalStats_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getColumnNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ColumnStats} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBoolStatsFieldBuilder(); + getLongStatsFieldBuilder(); + getDoubleStatsFieldBuilder(); + getStringStatsFieldBuilder(); + getBinaryStatsFieldBuilder(); + getDecimalStatsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lastAnalyzed_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + columnType_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + numNulls_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + numDistinctValues_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + if (boolStatsBuilder_ == null) { + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + } else { + boolStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + if (longStatsBuilder_ == null) { + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + } else { + longStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + if (doubleStatsBuilder_ == null) { + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + } else { + doubleStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + if (stringStatsBuilder_ == null) { + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } else { + stringStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + if (binaryStatsBuilder_ == null) { + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + } else { + binaryStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + if (decimalStatsBuilder_ == null) { + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + } else { + decimalStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + columnName_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lastAnalyzed_ = lastAnalyzed_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.columnType_ = columnType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.numNulls_ = numNulls_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.numDistinctValues_ = numDistinctValues_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (boolStatsBuilder_ == null) { + result.boolStats_ = boolStats_; + } else { + result.boolStats_ = boolStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (longStatsBuilder_ == null) { + result.longStats_ = longStats_; + } else { + result.longStats_ = longStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + if (doubleStatsBuilder_ == null) { + result.doubleStats_ = doubleStats_; + } else { + result.doubleStats_ = doubleStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + if (stringStatsBuilder_ == null) { + result.stringStats_ = stringStats_; + } else { + result.stringStats_ = stringStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000100; + } + if (binaryStatsBuilder_ == null) { + result.binaryStats_ = binaryStats_; + } else { + result.binaryStats_ = binaryStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000200; + } + if (decimalStatsBuilder_ == null) { + result.decimalStats_ = decimalStats_; + } else { + result.decimalStats_ = decimalStatsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000400; + } + result.columnName_ = columnName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance()) return this; + if (other.hasLastAnalyzed()) { + setLastAnalyzed(other.getLastAnalyzed()); + } + if (other.hasColumnType()) { + bitField0_ |= 0x00000002; + columnType_ = other.columnType_; + onChanged(); + } + if (other.hasNumNulls()) { + setNumNulls(other.getNumNulls()); + } + if (other.hasNumDistinctValues()) { + setNumDistinctValues(other.getNumDistinctValues()); + } + if (other.hasBoolStats()) { + mergeBoolStats(other.getBoolStats()); + } + if (other.hasLongStats()) { + mergeLongStats(other.getLongStats()); + } + if (other.hasDoubleStats()) { + mergeDoubleStats(other.getDoubleStats()); + } + if (other.hasStringStats()) { + mergeStringStats(other.getStringStats()); + } + if (other.hasBinaryStats()) { + mergeBinaryStats(other.getBinaryStats()); + } + if (other.hasDecimalStats()) { + mergeDecimalStats(other.getDecimalStats()); + } + if (other.hasColumnName()) { + bitField0_ |= 0x00000400; + columnName_ = other.columnName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasColumnType()) { + + return false; + } + if (hasDecimalStats()) { + if (!getDecimalStats().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 last_analyzed = 1; + private long lastAnalyzed_ ; + /** + * optional int64 last_analyzed = 1; + */ + public boolean hasLastAnalyzed() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 last_analyzed = 1; + */ + public long getLastAnalyzed() { + return lastAnalyzed_; + } + /** + * optional int64 last_analyzed = 1; + */ + public Builder setLastAnalyzed(long value) { + bitField0_ |= 0x00000001; + lastAnalyzed_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_analyzed = 1; + */ + public Builder clearLastAnalyzed() { + bitField0_ = (bitField0_ & ~0x00000001); + lastAnalyzed_ = 0L; + onChanged(); + return this; + } + + // required string column_type = 2; + private java.lang.Object columnType_ = ""; + /** + * required string column_type = 2; + */ + public boolean hasColumnType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string column_type = 2; + */ + public java.lang.String getColumnType() { + java.lang.Object ref = columnType_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string column_type = 2; + */ + public com.google.protobuf.ByteString + getColumnTypeBytes() { + java.lang.Object ref = columnType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string column_type = 2; + */ + public Builder setColumnType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + columnType_ = value; + onChanged(); + return this; + } + /** + * required string column_type = 2; + */ + public Builder clearColumnType() { + bitField0_ = (bitField0_ & ~0x00000002); + columnType_ = getDefaultInstance().getColumnType(); + onChanged(); + return this; + } + /** + * required string column_type = 2; + */ + public Builder setColumnTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + columnType_ = value; + onChanged(); + return this; + } + + // optional int64 num_nulls = 3; + private long numNulls_ ; + /** + * optional int64 num_nulls = 3; + */ + public boolean hasNumNulls() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 num_nulls = 3; + */ + public long getNumNulls() { + return numNulls_; + } + /** + * optional int64 num_nulls = 3; + */ + public Builder setNumNulls(long value) { + bitField0_ |= 0x00000004; + numNulls_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_nulls = 3; + */ + public Builder clearNumNulls() { + bitField0_ = (bitField0_ & ~0x00000004); + numNulls_ = 0L; + onChanged(); + return this; + } + + // optional int64 num_distinct_values = 4; + private long numDistinctValues_ ; + /** + * optional int64 num_distinct_values = 4; + */ + public boolean hasNumDistinctValues() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 num_distinct_values = 4; + */ + public long getNumDistinctValues() { + return numDistinctValues_; + } + /** + * optional int64 num_distinct_values = 4; + */ + public Builder setNumDistinctValues(long value) { + bitField0_ |= 0x00000008; + numDistinctValues_ = value; + onChanged(); + return this; + } + /** + * optional int64 num_distinct_values = 4; + */ + public Builder clearNumDistinctValues() { + bitField0_ = (bitField0_ & ~0x00000008); + numDistinctValues_ = 0L; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> boolStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public boolean hasBoolStats() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats getBoolStats() { + if (boolStatsBuilder_ == null) { + return boolStats_; + } else { + return boolStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder setBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { + if (boolStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + boolStats_ = value; + onChanged(); + } else { + boolStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder setBoolStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder builderForValue) { + if (boolStatsBuilder_ == null) { + boolStats_ = builderForValue.build(); + onChanged(); + } else { + boolStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder mergeBoolStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats value) { + if (boolStatsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + boolStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance()) { + boolStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder(boolStats_).mergeFrom(value).buildPartial(); + } else { + boolStats_ = value; + } + onChanged(); + } else { + boolStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public Builder clearBoolStats() { + if (boolStatsBuilder_ == null) { + boolStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.getDefaultInstance(); + onChanged(); + } else { + boolStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder getBoolStatsBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getBoolStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder getBoolStatsOrBuilder() { + if (boolStatsBuilder_ != null) { + return boolStatsBuilder_.getMessageOrBuilder(); + } else { + return boolStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.BooleanStats bool_stats = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder> + getBoolStatsFieldBuilder() { + if (boolStatsBuilder_ == null) { + boolStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.BooleanStatsOrBuilder>( + boolStats_, + getParentForChildren(), + isClean()); + boolStats_ = null; + } + return boolStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> longStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public boolean hasLongStats() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats getLongStats() { + if (longStatsBuilder_ == null) { + return longStats_; + } else { + return longStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder setLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { + if (longStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + longStats_ = value; + onChanged(); + } else { + longStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder setLongStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder builderForValue) { + if (longStatsBuilder_ == null) { + longStats_ = builderForValue.build(); + onChanged(); + } else { + longStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder mergeLongStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats value) { + if (longStatsBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + longStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance()) { + longStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.newBuilder(longStats_).mergeFrom(value).buildPartial(); + } else { + longStats_ = value; + } + onChanged(); + } else { + longStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public Builder clearLongStats() { + if (longStatsBuilder_ == null) { + longStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.getDefaultInstance(); + onChanged(); + } else { + longStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder getLongStatsBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getLongStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder getLongStatsOrBuilder() { + if (longStatsBuilder_ != null) { + return longStatsBuilder_.getMessageOrBuilder(); + } else { + return longStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.LongStats long_stats = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder> + getLongStatsFieldBuilder() { + if (longStatsBuilder_ == null) { + longStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.LongStatsOrBuilder>( + longStats_, + getParentForChildren(), + isClean()); + longStats_ = null; + } + return longStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> doubleStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public boolean hasDoubleStats() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats getDoubleStats() { + if (doubleStatsBuilder_ == null) { + return doubleStats_; + } else { + return doubleStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder setDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { + if (doubleStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + doubleStats_ = value; + onChanged(); + } else { + doubleStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder setDoubleStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder builderForValue) { + if (doubleStatsBuilder_ == null) { + doubleStats_ = builderForValue.build(); + onChanged(); + } else { + doubleStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder mergeDoubleStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats value) { + if (doubleStatsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040) && + doubleStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance()) { + doubleStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder(doubleStats_).mergeFrom(value).buildPartial(); + } else { + doubleStats_ = value; + } + onChanged(); + } else { + doubleStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000040; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public Builder clearDoubleStats() { + if (doubleStatsBuilder_ == null) { + doubleStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.getDefaultInstance(); + onChanged(); + } else { + doubleStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder getDoubleStatsBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getDoubleStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder getDoubleStatsOrBuilder() { + if (doubleStatsBuilder_ != null) { + return doubleStatsBuilder_.getMessageOrBuilder(); + } else { + return doubleStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DoubleStats double_stats = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder> + getDoubleStatsFieldBuilder() { + if (doubleStatsBuilder_ == null) { + doubleStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DoubleStatsOrBuilder>( + doubleStats_, + getParentForChildren(), + isClean()); + doubleStats_ = null; + } + return doubleStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> stringStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public boolean hasStringStats() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getStringStats() { + if (stringStatsBuilder_ == null) { + return stringStats_; + } else { + return stringStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder setStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (stringStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stringStats_ = value; + onChanged(); + } else { + stringStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder setStringStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { + if (stringStatsBuilder_ == null) { + stringStats_ = builderForValue.build(); + onChanged(); + } else { + stringStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder mergeStringStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (stringStatsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080) && + stringStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { + stringStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(stringStats_).mergeFrom(value).buildPartial(); + } else { + stringStats_ = value; + } + onChanged(); + } else { + stringStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000080; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public Builder clearStringStats() { + if (stringStatsBuilder_ == null) { + stringStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + onChanged(); + } else { + stringStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getStringStatsBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getStringStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getStringStatsOrBuilder() { + if (stringStatsBuilder_ != null) { + return stringStatsBuilder_.getMessageOrBuilder(); + } else { + return stringStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats string_stats = 8; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> + getStringStatsFieldBuilder() { + if (stringStatsBuilder_ == null) { + stringStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( + stringStats_, + getParentForChildren(), + isClean()); + stringStats_ = null; + } + return stringStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> binaryStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public boolean hasBinaryStats() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats getBinaryStats() { + if (binaryStatsBuilder_ == null) { + return binaryStats_; + } else { + return binaryStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder setBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (binaryStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + binaryStats_ = value; + onChanged(); + } else { + binaryStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder setBinaryStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder builderForValue) { + if (binaryStatsBuilder_ == null) { + binaryStats_ = builderForValue.build(); + onChanged(); + } else { + binaryStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder mergeBinaryStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats value) { + if (binaryStatsBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + binaryStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance()) { + binaryStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.newBuilder(binaryStats_).mergeFrom(value).buildPartial(); + } else { + binaryStats_ = value; + } + onChanged(); + } else { + binaryStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public Builder clearBinaryStats() { + if (binaryStatsBuilder_ == null) { + binaryStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.getDefaultInstance(); + onChanged(); + } else { + binaryStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder getBinaryStatsBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getBinaryStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder getBinaryStatsOrBuilder() { + if (binaryStatsBuilder_ != null) { + return binaryStatsBuilder_.getMessageOrBuilder(); + } else { + return binaryStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.StringStats binary_stats = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder> + getBinaryStatsFieldBuilder() { + if (binaryStatsBuilder_ == null) { + binaryStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.StringStatsOrBuilder>( + binaryStats_, + getParentForChildren(), + isClean()); + binaryStats_ = null; + } + return binaryStatsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> decimalStatsBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public boolean hasDecimalStats() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats getDecimalStats() { + if (decimalStatsBuilder_ == null) { + return decimalStats_; + } else { + return decimalStatsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder setDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { + if (decimalStatsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + decimalStats_ = value; + onChanged(); + } else { + decimalStatsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder setDecimalStats( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder builderForValue) { + if (decimalStatsBuilder_ == null) { + decimalStats_ = builderForValue.build(); + onChanged(); + } else { + decimalStatsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder mergeDecimalStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats value) { + if (decimalStatsBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + decimalStats_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance()) { + decimalStats_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder(decimalStats_).mergeFrom(value).buildPartial(); + } else { + decimalStats_ = value; + } + onChanged(); + } else { + decimalStatsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public Builder clearDecimalStats() { + if (decimalStatsBuilder_ == null) { + decimalStats_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.getDefaultInstance(); + onChanged(); + } else { + decimalStatsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder getDecimalStatsBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getDecimalStatsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder getDecimalStatsOrBuilder() { + if (decimalStatsBuilder_ != null) { + return decimalStatsBuilder_.getMessageOrBuilder(); + } else { + return decimalStats_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ColumnStats.DecimalStats decimal_stats = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder> + getDecimalStatsFieldBuilder() { + if (decimalStatsBuilder_ == null) { + decimalStatsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.DecimalStatsOrBuilder>( + decimalStats_, + getParentForChildren(), + isClean()); + decimalStats_ = null; + } + return decimalStatsBuilder_; + } + + // optional string column_name = 11; + private java.lang.Object columnName_ = ""; + /** + * optional string column_name = 11; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string column_name = 11; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string column_name = 11; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string column_name = 11; + */ + public Builder setColumnName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + columnName_ = value; + onChanged(); + return this; + } + /** + * optional string column_name = 11; + */ + public Builder clearColumnName() { + bitField0_ = (bitField0_ & ~0x00000400); + columnName_ = getDefaultInstance().getColumnName(); + onChanged(); + return this; + } + /** + * optional string column_name = 11; + */ + public Builder setColumnNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + columnName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) + } + + static { + defaultInstance = new ColumnStats(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ColumnStats) + } + + public interface DatabaseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string description = 1; + /** + * optional string description = 1; + */ + boolean hasDescription(); + /** + * optional string description = 1; + */ + java.lang.String getDescription(); + /** + * optional string description = 1; + */ + com.google.protobuf.ByteString + getDescriptionBytes(); + + // optional string uri = 2; + /** + * optional string uri = 2; + */ + boolean hasUri(); + /** + * optional string uri = 2; + */ + java.lang.String getUri(); + /** + * optional string uri = 2; + */ + com.google.protobuf.ByteString + getUriBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + boolean hasPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); + + // optional string owner_name = 5; + /** + * optional string owner_name = 5; + */ + boolean hasOwnerName(); + /** + * optional string owner_name = 5; + */ + java.lang.String getOwnerName(); + /** + * optional string owner_name = 5; + */ + com.google.protobuf.ByteString + getOwnerNameBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + boolean hasOwnerType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} + */ + public static final class Database extends + com.google.protobuf.GeneratedMessage + implements DatabaseOrBuilder { + // Use Database.newBuilder() to construct. + private Database(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Database(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Database defaultInstance; + public static Database getDefaultInstance() { + return defaultInstance; + } + + public Database getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Database( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + description_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + uri_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = privileges_.toBuilder(); + } + privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(privileges_); + privileges_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 42: { + bitField0_ |= 0x00000010; + ownerName_ = input.readBytes(); + break; + } + case 48: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(6, rawValue); + } else { + bitField0_ |= 0x00000020; + ownerType_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Database parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Database(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string description = 1; + public static final int DESCRIPTION_FIELD_NUMBER = 1; + private java.lang.Object description_; + /** + * optional string description = 1; + */ + public boolean hasDescription() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string description = 1; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + description_ = s; + } + return s; + } + } + /** + * optional string description = 1; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string uri = 2; + public static final int URI_FIELD_NUMBER = 2; + private java.lang.Object uri_; + /** + * optional string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + uri_ = s; + } + return s; + } + } + /** + * optional string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + public static final int PARAMETERS_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + public static final int PRIVILEGES_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + return privileges_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + return privileges_; + } + + // optional string owner_name = 5; + public static final int OWNER_NAME_FIELD_NUMBER = 5; + private java.lang.Object ownerName_; + /** + * optional string owner_name = 5; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner_name = 5; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ownerName_ = s; + } + return s; + } + } + /** + * optional string owner_name = 5; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + public static final int OWNER_TYPE_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + + private void initFields() { + description_ = ""; + uri_ = ""; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + ownerName_ = ""; + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getDescriptionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUriBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, parameters_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, privileges_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeEnum(6, ownerType_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getDescriptionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUriBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, parameters_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, privileges_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, ownerType_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Database} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DatabaseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParametersFieldBuilder(); + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + description_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + uri_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + ownerName_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.description_ = description_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.uri_ = uri_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (privilegesBuilder_ == null) { + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.ownerName_ = ownerName_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.ownerType_ = ownerType_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database.getDefaultInstance()) return this; + if (other.hasDescription()) { + bitField0_ |= 0x00000001; + description_ = other.description_; + onChanged(); + } + if (other.hasUri()) { + bitField0_ |= 0x00000002; + uri_ = other.uri_; + onChanged(); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + if (other.hasPrivileges()) { + mergePrivileges(other.getPrivileges()); + } + if (other.hasOwnerName()) { + bitField0_ |= 0x00000010; + ownerName_ = other.ownerName_; + onChanged(); + } + if (other.hasOwnerType()) { + setOwnerType(other.getOwnerType()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Database) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string description = 1; + private java.lang.Object description_ = ""; + /** + * optional string description = 1; + */ + public boolean hasDescription() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string description = 1; + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string description = 1; + */ + public com.google.protobuf.ByteString + getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string description = 1; + */ + public Builder setDescription( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + description_ = value; + onChanged(); + return this; + } + /** + * optional string description = 1; + */ + public Builder clearDescription() { + bitField0_ = (bitField0_ & ~0x00000001); + description_ = getDefaultInstance().getDescription(); + onChanged(); + return this; + } + /** + * optional string description = 1; + */ + public Builder setDescriptionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + description_ = value; + onChanged(); + return this; + } + + // optional string uri = 2; + private java.lang.Object uri_ = ""; + /** + * optional string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + uri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string uri = 2; + */ + public Builder setUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + /** + * optional string uri = 2; + */ + public Builder clearUri() { + bitField0_ = (bitField0_ & ~0x00000002); + uri_ = getDefaultInstance().getUri(); + onChanged(); + return this; + } + /** + * optional string uri = 2; + */ + public Builder setUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // optional string owner_name = 5; + private java.lang.Object ownerName_ = ""; + /** + * optional string owner_name = 5; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner_name = 5; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ownerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner_name = 5; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner_name = 5; + */ + public Builder setOwnerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + ownerName_ = value; + onChanged(); + return this; + } + /** + * optional string owner_name = 5; + */ + public Builder clearOwnerName() { + bitField0_ = (bitField0_ & ~0x00000010); + ownerName_ = getDefaultInstance().getOwnerName(); + onChanged(); + return this; + } + /** + * optional string owner_name = 5; + */ + public Builder setOwnerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + ownerName_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public Builder setOwnerType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + ownerType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 6; + */ + public Builder clearOwnerType() { + bitField0_ = (bitField0_ & ~0x00000020); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Database) + } + + static { + defaultInstance = new Database(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Database) + } + + public interface DelegationTokenOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string token_str = 1; + /** + * required string token_str = 1; + */ + boolean hasTokenStr(); + /** + * required string token_str = 1; + */ + java.lang.String getTokenStr(); + /** + * required string token_str = 1; + */ + com.google.protobuf.ByteString + getTokenStrBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.DelegationToken} + */ + public static final class DelegationToken extends + com.google.protobuf.GeneratedMessage + implements DelegationTokenOrBuilder { + // Use DelegationToken.newBuilder() to construct. + private DelegationToken(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DelegationToken(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DelegationToken defaultInstance; + public static DelegationToken getDefaultInstance() { + return defaultInstance; + } + + public DelegationToken getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DelegationToken( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tokenStr_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DelegationToken parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DelegationToken(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string token_str = 1; + public static final int TOKEN_STR_FIELD_NUMBER = 1; + private java.lang.Object tokenStr_; + /** + * required string token_str = 1; + */ + public boolean hasTokenStr() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string token_str = 1; + */ + public java.lang.String getTokenStr() { + java.lang.Object ref = tokenStr_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tokenStr_ = s; + } + return s; + } + } + /** + * required string token_str = 1; + */ + public com.google.protobuf.ByteString + getTokenStrBytes() { + java.lang.Object ref = tokenStr_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tokenStr_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + tokenStr_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTokenStr()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTokenStrBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTokenStrBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.DelegationToken} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationTokenOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tokenStr_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tokenStr_ = tokenStr_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken.getDefaultInstance()) return this; + if (other.hasTokenStr()) { + bitField0_ |= 0x00000001; + tokenStr_ = other.tokenStr_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTokenStr()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.DelegationToken) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string token_str = 1; + private java.lang.Object tokenStr_ = ""; + /** + * required string token_str = 1; + */ + public boolean hasTokenStr() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string token_str = 1; + */ + public java.lang.String getTokenStr() { + java.lang.Object ref = tokenStr_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tokenStr_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string token_str = 1; + */ + public com.google.protobuf.ByteString + getTokenStrBytes() { + java.lang.Object ref = tokenStr_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tokenStr_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string token_str = 1; + */ + public Builder setTokenStr( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tokenStr_ = value; + onChanged(); + return this; + } + /** + * required string token_str = 1; + */ + public Builder clearTokenStr() { + bitField0_ = (bitField0_ & ~0x00000001); + tokenStr_ = getDefaultInstance().getTokenStr(); + onChanged(); + return this; + } + /** + * required string token_str = 1; + */ + public Builder setTokenStrBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tokenStr_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.DelegationToken) + } + + static { + defaultInstance = new DelegationToken(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.DelegationToken) + } + + public interface FieldSchemaOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // required string type = 2; + /** + * required string type = 2; + */ + boolean hasType(); + /** + * required string type = 2; + */ + java.lang.String getType(); + /** + * required string type = 2; + */ + com.google.protobuf.ByteString + getTypeBytes(); + + // optional string comment = 3; + /** + * optional string comment = 3; + */ + boolean hasComment(); + /** + * optional string comment = 3; + */ + java.lang.String getComment(); + /** + * optional string comment = 3; + */ + com.google.protobuf.ByteString + getCommentBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} + */ + public static final class FieldSchema extends + com.google.protobuf.GeneratedMessage + implements FieldSchemaOrBuilder { + // Use FieldSchema.newBuilder() to construct. + private FieldSchema(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FieldSchema(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FieldSchema defaultInstance; + public static FieldSchema getDefaultInstance() { + return defaultInstance; + } + + public FieldSchema getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FieldSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + type_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + comment_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FieldSchema(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private java.lang.Object type_; + /** + * required string type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string type = 2; + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + type_ = s; + } + return s; + } + } + /** + * required string type = 2; + */ + public com.google.protobuf.ByteString + getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string comment = 3; + public static final int COMMENT_FIELD_NUMBER = 3; + private java.lang.Object comment_; + /** + * optional string comment = 3; + */ + public boolean hasComment() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string comment = 3; + */ + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + comment_ = s; + } + return s; + } + } + /** + * optional string comment = 3; + */ + public com.google.protobuf.ByteString + getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + type_ = ""; + comment_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getCommentBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTypeBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getCommentBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.FieldSchema} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + comment_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.comment_ = comment_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasType()) { + bitField0_ |= 0x00000002; + type_ = other.type_; + onChanged(); + } + if (other.hasComment()) { + bitField0_ |= 0x00000004; + comment_ = other.comment_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasType()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // required string type = 2; + private java.lang.Object type_ = ""; + /** + * required string type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string type = 2; + */ + public java.lang.String getType() { + java.lang.Object ref = type_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + type_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string type = 2; + */ + public com.google.protobuf.ByteString + getTypeBytes() { + java.lang.Object ref = type_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + type_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string type = 2; + */ + public Builder setType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required string type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = getDefaultInstance().getType(); + onChanged(); + return this; + } + /** + * required string type = 2; + */ + public Builder setTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + + // optional string comment = 3; + private java.lang.Object comment_ = ""; + /** + * optional string comment = 3; + */ + public boolean hasComment() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string comment = 3; + */ + public java.lang.String getComment() { + java.lang.Object ref = comment_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + comment_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string comment = 3; + */ + public com.google.protobuf.ByteString + getCommentBytes() { + java.lang.Object ref = comment_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + comment_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string comment = 3; + */ + public Builder setComment( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + comment_ = value; + onChanged(); + return this; + } + /** + * optional string comment = 3; + */ + public Builder clearComment() { + bitField0_ = (bitField0_ & ~0x00000004); + comment_ = getDefaultInstance().getComment(); + onChanged(); + return this; + } + /** + * optional string comment = 3; + */ + public Builder setCommentBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + comment_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) + } + + static { + defaultInstance = new FieldSchema(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.FieldSchema) + } + + public interface FunctionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string class_name = 1; + /** + * optional string class_name = 1; + */ + boolean hasClassName(); + /** + * optional string class_name = 1; + */ + java.lang.String getClassName(); + /** + * optional string class_name = 1; + */ + com.google.protobuf.ByteString + getClassNameBytes(); + + // optional string owner_name = 2; + /** + * optional string owner_name = 2; + */ + boolean hasOwnerName(); + /** + * optional string owner_name = 2; + */ + java.lang.String getOwnerName(); + /** + * optional string owner_name = 2; + */ + com.google.protobuf.ByteString + getOwnerNameBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + boolean hasOwnerType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType(); + + // optional sint64 create_time = 4; + /** + * optional sint64 create_time = 4; + */ + boolean hasCreateTime(); + /** + * optional sint64 create_time = 4; + */ + long getCreateTime(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + boolean hasFunctionType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + java.util.List + getResourceUrisList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + int getResourceUrisCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + java.util.List + getResourceUrisOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function} + */ + public static final class Function extends + com.google.protobuf.GeneratedMessage + implements FunctionOrBuilder { + // Use Function.newBuilder() to construct. + private Function(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Function(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Function defaultInstance; + public static Function getDefaultInstance() { + return defaultInstance; + } + + public Function getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Function( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + className_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + ownerName_ = input.readBytes(); + break; + } + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + ownerType_ = value; + } + break; + } + case 32: { + bitField0_ |= 0x00000008; + createTime_ = input.readSInt64(); + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + functionType_ = value; + } + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + resourceUris_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + resourceUris_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + resourceUris_ = java.util.Collections.unmodifiableList(resourceUris_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Function parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Function(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.Function.FunctionType} + */ + public enum FunctionType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * JAVA = 1; + */ + JAVA(0, 1), + ; + + /** + * JAVA = 1; + */ + public static final int JAVA_VALUE = 1; + + + public final int getNumber() { return value; } + + public static FunctionType valueOf(int value) { + switch (value) { + case 1: return JAVA; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FunctionType findValueByNumber(int number) { + return FunctionType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDescriptor().getEnumTypes().get(0); + } + + private static final FunctionType[] VALUES = values(); + + public static FunctionType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FunctionType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.Function.FunctionType) + } + + public interface ResourceUriOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + boolean hasResourceType(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType(); + + // required string uri = 2; + /** + * required string uri = 2; + */ + boolean hasUri(); + /** + * required string uri = 2; + */ + java.lang.String getUri(); + /** + * required string uri = 2; + */ + com.google.protobuf.ByteString + getUriBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri} + */ + public static final class ResourceUri extends + com.google.protobuf.GeneratedMessage + implements ResourceUriOrBuilder { + // Use ResourceUri.newBuilder() to construct. + private ResourceUri(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ResourceUri(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ResourceUri defaultInstance; + public static ResourceUri getDefaultInstance() { + return defaultInstance; + } + + public ResourceUri getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ResourceUri( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + resourceType_ = value; + } + break; + } + case 18: { + bitField0_ |= 0x00000002; + uri_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ResourceUri parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ResourceUri(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType} + */ + public enum ResourceType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * JAR = 1; + */ + JAR(0, 1), + /** + * FILE = 2; + */ + FILE(1, 2), + /** + * ARCHIVE = 3; + */ + ARCHIVE(2, 3), + ; + + /** + * JAR = 1; + */ + public static final int JAR_VALUE = 1; + /** + * FILE = 2; + */ + public static final int FILE_VALUE = 2; + /** + * ARCHIVE = 3; + */ + public static final int ARCHIVE_VALUE = 3; + + + public final int getNumber() { return value; } + + public static ResourceType valueOf(int value) { + switch (value) { + case 1: return JAR; + case 2: return FILE; + case 3: return ARCHIVE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ResourceType findValueByNumber(int number) { + return ResourceType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDescriptor().getEnumTypes().get(0); + } + + private static final ResourceType[] VALUES = values(); + + public static ResourceType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ResourceType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType) + } + + private int bitField0_; + // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + public static final int RESOURCE_TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType resourceType_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public boolean hasResourceType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType() { + return resourceType_; + } + + // required string uri = 2; + public static final int URI_FIELD_NUMBER = 2; + private java.lang.Object uri_; + /** + * required string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + uri_ = s; + } + return s; + } + } + /** + * required string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; + uri_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasResourceType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasUri()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, resourceType_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUriBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, resourceType_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUriBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; + bitField0_ = (bitField0_ & ~0x00000001); + uri_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.resourceType_ = resourceType_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.uri_ = uri_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()) return this; + if (other.hasResourceType()) { + setResourceType(other.getResourceType()); + } + if (other.hasUri()) { + bitField0_ |= 0x00000002; + uri_ = other.uri_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasResourceType()) { + + return false; + } + if (!hasUri()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public boolean hasResourceType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType getResourceType() { + return resourceType_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public Builder setResourceType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + resourceType_ = value; + onChanged(); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri.ResourceType resource_type = 1; + */ + public Builder clearResourceType() { + bitField0_ = (bitField0_ & ~0x00000001); + resourceType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; + onChanged(); + return this; + } + + // required string uri = 2; + private java.lang.Object uri_ = ""; + /** + * required string uri = 2; + */ + public boolean hasUri() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string uri = 2; + */ + public java.lang.String getUri() { + java.lang.Object ref = uri_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + uri_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string uri = 2; + */ + public com.google.protobuf.ByteString + getUriBytes() { + java.lang.Object ref = uri_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + uri_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string uri = 2; + */ + public Builder setUri( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + /** + * required string uri = 2; + */ + public Builder clearUri() { + bitField0_ = (bitField0_ & ~0x00000002); + uri_ = getDefaultInstance().getUri(); + onChanged(); + return this; + } + /** + * required string uri = 2; + */ + public Builder setUriBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + uri_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri) + } + + static { + defaultInstance = new ResourceUri(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri) + } + + private int bitField0_; + // optional string class_name = 1; + public static final int CLASS_NAME_FIELD_NUMBER = 1; + private java.lang.Object className_; + /** + * optional string class_name = 1; + */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string class_name = 1; + */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + className_ = s; + } + return s; + } + } + /** + * optional string class_name = 1; + */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string owner_name = 2; + public static final int OWNER_NAME_FIELD_NUMBER = 2; + private java.lang.Object ownerName_; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ownerName_ = s; + } + return s; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + public static final int OWNER_TYPE_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + + // optional sint64 create_time = 4; + public static final int CREATE_TIME_FIELD_NUMBER = 4; + private long createTime_; + /** + * optional sint64 create_time = 4; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional sint64 create_time = 4; + */ + public long getCreateTime() { + return createTime_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + public static final int FUNCTION_TYPE_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType functionType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public boolean hasFunctionType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType() { + return functionType_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + public static final int RESOURCE_URIS_FIELD_NUMBER = 6; + private java.util.List resourceUris_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public java.util.List getResourceUrisList() { + return resourceUris_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public java.util.List + getResourceUrisOrBuilderList() { + return resourceUris_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public int getResourceUrisCount() { + return resourceUris_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index) { + return resourceUris_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( + int index) { + return resourceUris_.get(index); + } + + private void initFields() { + className_ = ""; + ownerName_ = ""; + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + createTime_ = 0L; + functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; + resourceUris_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getResourceUrisCount(); i++) { + if (!getResourceUris(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, ownerType_.getNumber()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeSInt64(4, createTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, functionType_.getNumber()); + } + for (int i = 0; i < resourceUris_.size(); i++) { + output.writeMessage(6, resourceUris_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getOwnerNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, ownerType_.getNumber()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(4, createTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, functionType_.getNumber()); + } + for (int i = 0; i < resourceUris_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, resourceUris_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Function} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FunctionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getResourceUrisFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + className_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + ownerName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000004); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; + bitField0_ = (bitField0_ & ~0x00000010); + if (resourceUrisBuilder_ == null) { + resourceUris_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + resourceUrisBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.className_ = className_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ownerName_ = ownerName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.ownerType_ = ownerType_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.functionType_ = functionType_; + if (resourceUrisBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + resourceUris_ = java.util.Collections.unmodifiableList(resourceUris_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.resourceUris_ = resourceUris_; + } else { + result.resourceUris_ = resourceUrisBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.getDefaultInstance()) return this; + if (other.hasClassName()) { + bitField0_ |= 0x00000001; + className_ = other.className_; + onChanged(); + } + if (other.hasOwnerName()) { + bitField0_ |= 0x00000002; + ownerName_ = other.ownerName_; + onChanged(); + } + if (other.hasOwnerType()) { + setOwnerType(other.getOwnerType()); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasFunctionType()) { + setFunctionType(other.getFunctionType()); + } + if (resourceUrisBuilder_ == null) { + if (!other.resourceUris_.isEmpty()) { + if (resourceUris_.isEmpty()) { + resourceUris_ = other.resourceUris_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureResourceUrisIsMutable(); + resourceUris_.addAll(other.resourceUris_); + } + onChanged(); + } + } else { + if (!other.resourceUris_.isEmpty()) { + if (resourceUrisBuilder_.isEmpty()) { + resourceUrisBuilder_.dispose(); + resourceUrisBuilder_ = null; + resourceUris_ = other.resourceUris_; + bitField0_ = (bitField0_ & ~0x00000020); + resourceUrisBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getResourceUrisFieldBuilder() : null; + } else { + resourceUrisBuilder_.addAllMessages(other.resourceUris_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getResourceUrisCount(); i++) { + if (!getResourceUris(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string class_name = 1; + private java.lang.Object className_ = ""; + /** + * optional string class_name = 1; + */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string class_name = 1; + */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + className_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string class_name = 1; + */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string class_name = 1; + */ + public Builder setClassName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + /** + * optional string class_name = 1; + */ + public Builder clearClassName() { + bitField0_ = (bitField0_ & ~0x00000001); + className_ = getDefaultInstance().getClassName(); + onChanged(); + return this; + } + /** + * optional string class_name = 1; + */ + public Builder setClassNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + + // optional string owner_name = 2; + private java.lang.Object ownerName_ = ""; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ownerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder clearOwnerName() { + bitField0_ = (bitField0_ & ~0x00000002); + ownerName_ = getDefaultInstance().getOwnerName(); + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public boolean hasOwnerType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getOwnerType() { + return ownerType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public Builder setOwnerType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + ownerType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType owner_type = 3; + */ + public Builder clearOwnerType() { + bitField0_ = (bitField0_ & ~0x00000004); + ownerType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional sint64 create_time = 4; + private long createTime_ ; + /** + * optional sint64 create_time = 4; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional sint64 create_time = 4; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional sint64 create_time = 4; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000008; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional sint64 create_time = 4; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000008); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public boolean hasFunctionType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType getFunctionType() { + return functionType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public Builder setFunctionType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + functionType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Function.FunctionType function_type = 5; + */ + public Builder clearFunctionType() { + bitField0_ = (bitField0_ & ~0x00000010); + functionType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.FunctionType.JAVA; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + private java.util.List resourceUris_ = + java.util.Collections.emptyList(); + private void ensureResourceUrisIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + resourceUris_ = new java.util.ArrayList(resourceUris_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder> resourceUrisBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public java.util.List getResourceUrisList() { + if (resourceUrisBuilder_ == null) { + return java.util.Collections.unmodifiableList(resourceUris_); + } else { + return resourceUrisBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public int getResourceUrisCount() { + if (resourceUrisBuilder_ == null) { + return resourceUris_.size(); + } else { + return resourceUrisBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri getResourceUris(int index) { + if (resourceUrisBuilder_ == null) { + return resourceUris_.get(index); + } else { + return resourceUrisBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder setResourceUris( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { + if (resourceUrisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResourceUrisIsMutable(); + resourceUris_.set(index, value); + onChanged(); + } else { + resourceUrisBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder setResourceUris( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { + if (resourceUrisBuilder_ == null) { + ensureResourceUrisIsMutable(); + resourceUris_.set(index, builderForValue.build()); + onChanged(); + } else { + resourceUrisBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder addResourceUris(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { + if (resourceUrisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResourceUrisIsMutable(); + resourceUris_.add(value); + onChanged(); + } else { + resourceUrisBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder addResourceUris( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri value) { + if (resourceUrisBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResourceUrisIsMutable(); + resourceUris_.add(index, value); + onChanged(); + } else { + resourceUrisBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder addResourceUris( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { + if (resourceUrisBuilder_ == null) { + ensureResourceUrisIsMutable(); + resourceUris_.add(builderForValue.build()); + onChanged(); + } else { + resourceUrisBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder addResourceUris( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder builderForValue) { + if (resourceUrisBuilder_ == null) { + ensureResourceUrisIsMutable(); + resourceUris_.add(index, builderForValue.build()); + onChanged(); + } else { + resourceUrisBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder addAllResourceUris( + java.lang.Iterable values) { + if (resourceUrisBuilder_ == null) { + ensureResourceUrisIsMutable(); + super.addAll(values, resourceUris_); + onChanged(); + } else { + resourceUrisBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder clearResourceUris() { + if (resourceUrisBuilder_ == null) { + resourceUris_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + resourceUrisBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public Builder removeResourceUris(int index) { + if (resourceUrisBuilder_ == null) { + ensureResourceUrisIsMutable(); + resourceUris_.remove(index); + onChanged(); + } else { + resourceUrisBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder getResourceUrisBuilder( + int index) { + return getResourceUrisFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder getResourceUrisOrBuilder( + int index) { + if (resourceUrisBuilder_ == null) { + return resourceUris_.get(index); } else { + return resourceUrisBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public java.util.List + getResourceUrisOrBuilderList() { + if (resourceUrisBuilder_ != null) { + return resourceUrisBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(resourceUris_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder addResourceUrisBuilder() { + return getResourceUrisFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder addResourceUrisBuilder( + int index) { + return getResourceUrisFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.Function.ResourceUri resource_uris = 6; + */ + public java.util.List + getResourceUrisBuilderList() { + return getResourceUrisFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder> + getResourceUrisFieldBuilder() { + if (resourceUrisBuilder_ == null) { + resourceUrisBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUri.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Function.ResourceUriOrBuilder>( + resourceUris_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + resourceUris_ = null; + } + return resourceUrisBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Function) + } + + static { + defaultInstance = new Function(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Function) + } + + public interface MasterKeyOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string master_key = 1; + /** + * required string master_key = 1; + */ + boolean hasMasterKey(); + /** + * required string master_key = 1; + */ + java.lang.String getMasterKey(); + /** + * required string master_key = 1; + */ + com.google.protobuf.ByteString + getMasterKeyBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.MasterKey} + */ + public static final class MasterKey extends + com.google.protobuf.GeneratedMessage + implements MasterKeyOrBuilder { + // Use MasterKey.newBuilder() to construct. + private MasterKey(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MasterKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MasterKey defaultInstance; + public static MasterKey getDefaultInstance() { + return defaultInstance; + } + + public MasterKey getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MasterKey( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + masterKey_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MasterKey parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MasterKey(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string master_key = 1; + public static final int MASTER_KEY_FIELD_NUMBER = 1; + private java.lang.Object masterKey_; + /** + * required string master_key = 1; + */ + public boolean hasMasterKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string master_key = 1; + */ + public java.lang.String getMasterKey() { + java.lang.Object ref = masterKey_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + masterKey_ = s; + } + return s; + } + } + /** + * required string master_key = 1; + */ + public com.google.protobuf.ByteString + getMasterKeyBytes() { + java.lang.Object ref = masterKey_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + masterKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + masterKey_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasMasterKey()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getMasterKeyBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getMasterKeyBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.MasterKey} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKeyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + masterKey_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.masterKey_ = masterKey_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey.getDefaultInstance()) return this; + if (other.hasMasterKey()) { + bitField0_ |= 0x00000001; + masterKey_ = other.masterKey_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasMasterKey()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.MasterKey) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string master_key = 1; + private java.lang.Object masterKey_ = ""; + /** + * required string master_key = 1; + */ + public boolean hasMasterKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string master_key = 1; + */ + public java.lang.String getMasterKey() { + java.lang.Object ref = masterKey_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + masterKey_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string master_key = 1; + */ + public com.google.protobuf.ByteString + getMasterKeyBytes() { + java.lang.Object ref = masterKey_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + masterKey_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string master_key = 1; + */ + public Builder setMasterKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + masterKey_ = value; + onChanged(); + return this; + } + /** + * required string master_key = 1; + */ + public Builder clearMasterKey() { + bitField0_ = (bitField0_ & ~0x00000001); + masterKey_ = getDefaultInstance().getMasterKey(); + onChanged(); + return this; + } + /** + * required string master_key = 1; + */ + public Builder setMasterKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + masterKey_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.MasterKey) + } + + static { + defaultInstance = new MasterKey(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.MasterKey) + } + + public interface ParameterEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required string value = 2; + /** + * required string value = 2; + */ + boolean hasValue(); + /** + * required string value = 2; + */ + java.lang.String getValue(); + /** + * required string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} + */ + public static final class ParameterEntry extends + com.google.protobuf.GeneratedMessage + implements ParameterEntryOrBuilder { + // Use ParameterEntry.newBuilder() to construct. + private ParameterEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ParameterEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ParameterEntry defaultInstance; + public static ParameterEntry getDefaultInstance() { + return defaultInstance; + } + + public ParameterEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ParameterEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ParameterEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ParameterEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = ""; + value_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ParameterEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // required string value = 2; + private java.lang.Object value_ = ""; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) + } + + static { + defaultInstance = new ParameterEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ParameterEntry) + } + + public interface ParametersOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + java.util.List + getParameterList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + int getParameterCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + java.util.List + getParameterOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} + */ + public static final class Parameters extends + com.google.protobuf.GeneratedMessage + implements ParametersOrBuilder { + // Use Parameters.newBuilder() to construct. + private Parameters(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Parameters(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Parameters defaultInstance; + public static Parameters getDefaultInstance() { + return defaultInstance; + } + + public Parameters getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Parameters( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + parameter_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = java.util.Collections.unmodifiableList(parameter_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Parameters parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Parameters(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + public static final int PARAMETER_FIELD_NUMBER = 1; + private java.util.List parameter_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List getParameterList() { + return parameter_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterOrBuilderList() { + return parameter_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public int getParameterCount() { + return parameter_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { + return parameter_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index) { + return parameter_.get(index); + } + + private void initFields() { + parameter_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getParameterCount(); i++) { + if (!getParameter(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < parameter_.size(); i++) { + output.writeMessage(1, parameter_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < parameter_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, parameter_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Parameters} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParameterFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (parameterBuilder_ == null) { + parameter_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + parameterBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters(this); + int from_bitField0_ = bitField0_; + if (parameterBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = java.util.Collections.unmodifiableList(parameter_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.parameter_ = parameter_; + } else { + result.parameter_ = parameterBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) return this; + if (parameterBuilder_ == null) { + if (!other.parameter_.isEmpty()) { + if (parameter_.isEmpty()) { + parameter_ = other.parameter_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureParameterIsMutable(); + parameter_.addAll(other.parameter_); + } + onChanged(); + } + } else { + if (!other.parameter_.isEmpty()) { + if (parameterBuilder_.isEmpty()) { + parameterBuilder_.dispose(); + parameterBuilder_ = null; + parameter_ = other.parameter_; + bitField0_ = (bitField0_ & ~0x00000001); + parameterBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getParameterFieldBuilder() : null; + } else { + parameterBuilder_.addAllMessages(other.parameter_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getParameterCount(); i++) { + if (!getParameter(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + private java.util.List parameter_ = + java.util.Collections.emptyList(); + private void ensureParameterIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + parameter_ = new java.util.ArrayList(parameter_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> parameterBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List getParameterList() { + if (parameterBuilder_ == null) { + return java.util.Collections.unmodifiableList(parameter_); + } else { + return parameterBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public int getParameterCount() { + if (parameterBuilder_ == null) { + return parameter_.size(); + } else { + return parameterBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry getParameter(int index) { + if (parameterBuilder_ == null) { + return parameter_.get(index); + } else { + return parameterBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder setParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.set(index, value); + onChanged(); + } else { + parameterBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder setParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.set(index, builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.add(value); + onChanged(); + } else { + parameterBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry value) { + if (parameterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParameterIsMutable(); + parameter_.add(index, value); + onChanged(); + } else { + parameterBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.add(builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addParameter( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder builderForValue) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.add(index, builderForValue.build()); + onChanged(); + } else { + parameterBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder addAllParameter( + java.lang.Iterable values) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + super.addAll(values, parameter_); + onChanged(); + } else { + parameterBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder clearParameter() { + if (parameterBuilder_ == null) { + parameter_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + parameterBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public Builder removeParameter(int index) { + if (parameterBuilder_ == null) { + ensureParameterIsMutable(); + parameter_.remove(index); + onChanged(); + } else { + parameterBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder getParameterBuilder( + int index) { + return getParameterFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder getParameterOrBuilder( + int index) { + if (parameterBuilder_ == null) { + return parameter_.get(index); } else { + return parameterBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterOrBuilderList() { + if (parameterBuilder_ != null) { + return parameterBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(parameter_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder() { + return getParameterFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder addParameterBuilder( + int index) { + return getParameterFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.ParameterEntry parameter = 1; + */ + public java.util.List + getParameterBuilderList() { + return getParameterFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder> + getParameterFieldBuilder() { + if (parameterBuilder_ == null) { + parameterBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParameterEntryOrBuilder>( + parameter_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + parameter_ = null; + } + return parameterBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) + } + + static { + defaultInstance = new Parameters(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Parameters) + } + + public interface PartitionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 create_time = 1; + /** + * optional int64 create_time = 1; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 1; + */ + long getCreateTime(); + + // optional int64 last_access_time = 2; + /** + * optional int64 last_access_time = 2; + */ + boolean hasLastAccessTime(); + /** + * optional int64 last_access_time = 2; + */ + long getLastAccessTime(); + + // optional string location = 3; + /** + * optional string location = 3; + */ + boolean hasLocation(); + /** + * optional string location = 3; + */ + java.lang.String getLocation(); + /** + * optional string location = 3; + */ + com.google.protobuf.ByteString + getLocationBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + boolean hasSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); + + // required bytes sd_hash = 5; + /** + * required bytes sd_hash = 5; + */ + boolean hasSdHash(); + /** + * required bytes sd_hash = 5; + */ + com.google.protobuf.ByteString getSdHash(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} + */ + public static final class Partition extends + com.google.protobuf.GeneratedMessage + implements PartitionOrBuilder { + // Use Partition.newBuilder() to construct. + private Partition(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Partition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Partition defaultInstance; + public static Partition getDefaultInstance() { + return defaultInstance; + } + + public Partition getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Partition( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + createTime_ = input.readInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + lastAccessTime_ = input.readInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + location_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = sdParameters_.toBuilder(); + } + sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sdParameters_); + sdParameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 42: { + bitField0_ |= 0x00000010; + sdHash_ = input.readBytes(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Partition parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Partition(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 create_time = 1; + public static final int CREATE_TIME_FIELD_NUMBER = 1; + private long createTime_; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + + // optional int64 last_access_time = 2; + public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 2; + private long lastAccessTime_; + /** + * optional int64 last_access_time = 2; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 last_access_time = 2; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + + // optional string location = 3; + public static final int LOCATION_FIELD_NUMBER = 3; + private java.lang.Object location_; + /** + * optional string location = 3; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string location = 3; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + location_ = s; + } + return s; + } + } + /** + * optional string location = 3; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + public static final int SD_PARAMETERS_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + return sdParameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + return sdParameters_; + } + + // required bytes sd_hash = 5; + public static final int SD_HASH_FIELD_NUMBER = 5; + private com.google.protobuf.ByteString sdHash_; + /** + * required bytes sd_hash = 5; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required bytes sd_hash = 5; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + public static final int PARAMETERS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+     * partition parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + private void initFields() { + createTime_ = 0L; + lastAccessTime_ = 0L; + location_ = ""; + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSdHash()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, lastAccessTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getLocationBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, sdParameters_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, sdHash_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, parameters_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, lastAccessTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getLocationBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, sdParameters_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, sdHash_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, parameters_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Partition} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSdParametersFieldBuilder(); + getParametersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + lastAccessTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + location_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000010); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.lastAccessTime_ = lastAccessTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.location_ = location_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (sdParametersBuilder_ == null) { + result.sdParameters_ = sdParameters_; + } else { + result.sdParameters_ = sdParametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.sdHash_ = sdHash_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition.getDefaultInstance()) return this; + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasLastAccessTime()) { + setLastAccessTime(other.getLastAccessTime()); + } + if (other.hasLocation()) { + bitField0_ |= 0x00000004; + location_ = other.location_; + onChanged(); + } + if (other.hasSdParameters()) { + mergeSdParameters(other.getSdParameters()); + } + if (other.hasSdHash()) { + setSdHash(other.getSdHash()); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSdHash()) { + + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Partition) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 create_time = 1; + private long createTime_ ; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 1; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000001; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 1; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 last_access_time = 2; + private long lastAccessTime_ ; + /** + * optional int64 last_access_time = 2; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 last_access_time = 2; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + /** + * optional int64 last_access_time = 2; + */ + public Builder setLastAccessTime(long value) { + bitField0_ |= 0x00000002; + lastAccessTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_access_time = 2; + */ + public Builder clearLastAccessTime() { + bitField0_ = (bitField0_ & ~0x00000002); + lastAccessTime_ = 0L; + onChanged(); + return this; + } + + // optional string location = 3; + private java.lang.Object location_ = ""; + /** + * optional string location = 3; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string location = 3; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string location = 3; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string location = 3; + */ + public Builder setLocation( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + location_ = value; + onChanged(); + return this; + } + /** + * optional string location = 3; + */ + public Builder clearLocation() { + bitField0_ = (bitField0_ & ~0x00000004); + location_ = getDefaultInstance().getLocation(); + onChanged(); + return this; + } + /** + * optional string location = 3; + */ + public Builder setLocationBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + location_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + if (sdParametersBuilder_ == null) { + return sdParameters_; + } else { + return sdParametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sdParameters_ = value; + onChanged(); + } else { + sdParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (sdParametersBuilder_ == null) { + sdParameters_ = builderForValue.build(); + onChanged(); + } else { + sdParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + sdParameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); + } else { + sdParameters_ = value; + } + onChanged(); + } else { + sdParametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder clearSdParameters() { + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSdParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + if (sdParametersBuilder_ != null) { + return sdParametersBuilder_.getMessageOrBuilder(); + } else { + return sdParameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 4; + * + *
+       * storage descriptor parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getSdParametersFieldBuilder() { + if (sdParametersBuilder_ == null) { + sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + sdParameters_, + getParentForChildren(), + isClean()); + sdParameters_ = null; + } + return sdParametersBuilder_; + } + + // required bytes sd_hash = 5; + private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes sd_hash = 5; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required bytes sd_hash = 5; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + /** + * required bytes sd_hash = 5; + */ + public Builder setSdHash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + sdHash_ = value; + onChanged(); + return this; + } + /** + * required bytes sd_hash = 5; + */ + public Builder clearSdHash() { + bitField0_ = (bitField0_ & ~0x00000010); + sdHash_ = getDefaultInstance().getSdHash(); + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 6; + * + *
+       * partition parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Partition) + } + + static { + defaultInstance = new Partition(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Partition) + } + + public interface PrincipalPrivilegeSetEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string principal_name = 1; + /** + * required string principal_name = 1; + */ + boolean hasPrincipalName(); + /** + * required string principal_name = 1; + */ + java.lang.String getPrincipalName(); + /** + * required string principal_name = 1; + */ + com.google.protobuf.ByteString + getPrincipalNameBytes(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + java.util.List + getPrivilegesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + int getPrivilegesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + java.util.List + getPrivilegesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} + */ + public static final class PrincipalPrivilegeSetEntry extends + com.google.protobuf.GeneratedMessage + implements PrincipalPrivilegeSetEntryOrBuilder { + // Use PrincipalPrivilegeSetEntry.newBuilder() to construct. + private PrincipalPrivilegeSetEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrincipalPrivilegeSetEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrincipalPrivilegeSetEntry defaultInstance; + public static PrincipalPrivilegeSetEntry getDefaultInstance() { + return defaultInstance; + } + + public PrincipalPrivilegeSetEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrincipalPrivilegeSetEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + principalName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + privileges_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = java.util.Collections.unmodifiableList(privileges_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrincipalPrivilegeSetEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrincipalPrivilegeSetEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string principal_name = 1; + public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; + private java.lang.Object principalName_; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + principalName_ = s; + } + return s; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + public static final int PRIVILEGES_FIELD_NUMBER = 2; + private java.util.List privileges_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List getPrivilegesList() { + return privileges_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesOrBuilderList() { + return privileges_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public int getPrivilegesCount() { + return privileges_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { + return privileges_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index) { + return privileges_.get(index); + } + + private void initFields() { + principalName_ = ""; + privileges_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPrincipalName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrincipalNameBytes()); + } + for (int i = 0; i < privileges_.size(); i++) { + output.writeMessage(2, privileges_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrincipalNameBytes()); + } + for (int i = 0; i < privileges_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, privileges_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + principalName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (privilegesBuilder_ == null) { + privileges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + privilegesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.principalName_ = principalName_; + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = java.util.Collections.unmodifiableList(privileges_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()) return this; + if (other.hasPrincipalName()) { + bitField0_ |= 0x00000001; + principalName_ = other.principalName_; + onChanged(); + } + if (privilegesBuilder_ == null) { + if (!other.privileges_.isEmpty()) { + if (privileges_.isEmpty()) { + privileges_ = other.privileges_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensurePrivilegesIsMutable(); + privileges_.addAll(other.privileges_); + } + onChanged(); + } + } else { + if (!other.privileges_.isEmpty()) { + if (privilegesBuilder_.isEmpty()) { + privilegesBuilder_.dispose(); + privilegesBuilder_ = null; + privileges_ = other.privileges_; + bitField0_ = (bitField0_ & ~0x00000002); + privilegesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getPrivilegesFieldBuilder() : null; + } else { + privilegesBuilder_.addAllMessages(other.privileges_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPrincipalName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string principal_name = 1; + private java.lang.Object principalName_ = ""; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + principalName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder clearPrincipalName() { + bitField0_ = (bitField0_ & ~0x00000001); + principalName_ = getDefaultInstance().getPrincipalName(); + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + private java.util.List privileges_ = + java.util.Collections.emptyList(); + private void ensurePrivilegesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + privileges_ = new java.util.ArrayList(privileges_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> privilegesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List getPrivilegesList() { + if (privilegesBuilder_ == null) { + return java.util.Collections.unmodifiableList(privileges_); + } else { + return privilegesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public int getPrivilegesCount() { + if (privilegesBuilder_ == null) { + return privileges_.size(); + } else { + return privilegesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getPrivileges(int index) { + if (privilegesBuilder_ == null) { + return privileges_.get(index); + } else { + return privilegesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder setPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.set(index, value); + onChanged(); + } else { + privilegesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder setPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.set(index, builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.add(value); + onChanged(); + } else { + privilegesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePrivilegesIsMutable(); + privileges_.add(index, value); + onChanged(); + } else { + privilegesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.add(builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addPrivileges( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder builderForValue) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.add(index, builderForValue.build()); + onChanged(); + } else { + privilegesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder addAllPrivileges( + java.lang.Iterable values) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + super.addAll(values, privileges_); + onChanged(); + } else { + privilegesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public Builder removePrivileges(int index) { + if (privilegesBuilder_ == null) { + ensurePrivilegesIsMutable(); + privileges_.remove(index); + onChanged(); + } else { + privilegesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder getPrivilegesBuilder( + int index) { + return getPrivilegesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder getPrivilegesOrBuilder( + int index) { + if (privilegesBuilder_ == null) { + return privileges_.get(index); } else { + return privilegesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesOrBuilderList() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(privileges_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder() { + return getPrivilegesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder addPrivilegesBuilder( + int index) { + return getPrivilegesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo privileges = 2; + */ + public java.util.List + getPrivilegesBuilderList() { + return getPrivilegesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder>( + privileges_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) + } + + static { + defaultInstance = new PrincipalPrivilegeSetEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry) + } + + public interface PrincipalPrivilegeSetOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + java.util.List + getUsersList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + int getUsersCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + java.util.List + getUsersOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + java.util.List + getRolesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + int getRolesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + java.util.List + getRolesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} + */ + public static final class PrincipalPrivilegeSet extends + com.google.protobuf.GeneratedMessage + implements PrincipalPrivilegeSetOrBuilder { + // Use PrincipalPrivilegeSet.newBuilder() to construct. + private PrincipalPrivilegeSet(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrincipalPrivilegeSet(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrincipalPrivilegeSet defaultInstance; + public static PrincipalPrivilegeSet getDefaultInstance() { + return defaultInstance; + } + + public PrincipalPrivilegeSet getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrincipalPrivilegeSet( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + users_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + users_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + roles_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + users_ = java.util.Collections.unmodifiableList(users_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = java.util.Collections.unmodifiableList(roles_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrincipalPrivilegeSet parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrincipalPrivilegeSet(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + public static final int USERS_FIELD_NUMBER = 1; + private java.util.List users_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List getUsersList() { + return users_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersOrBuilderList() { + return users_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public int getUsersCount() { + return users_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { + return users_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index) { + return users_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + public static final int ROLES_FIELD_NUMBER = 2; + private java.util.List roles_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List getRolesList() { + return roles_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesOrBuilderList() { + return roles_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public int getRolesCount() { + return roles_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { + return roles_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index) { + return roles_.get(index); + } + + private void initFields() { + users_ = java.util.Collections.emptyList(); + roles_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getUsersCount(); i++) { + if (!getUsers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRolesCount(); i++) { + if (!getRoles(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < users_.size(); i++) { + output.writeMessage(1, users_.get(i)); + } + for (int i = 0; i < roles_.size(); i++) { + output.writeMessage(2, roles_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < users_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, users_.get(i)); + } + for (int i = 0; i < roles_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, roles_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUsersFieldBuilder(); + getRolesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (usersBuilder_ == null) { + users_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + usersBuilder_.clear(); + } + if (rolesBuilder_ == null) { + roles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + rolesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet(this); + int from_bitField0_ = bitField0_; + if (usersBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + users_ = java.util.Collections.unmodifiableList(users_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.users_ = users_; + } else { + result.users_ = usersBuilder_.build(); + } + if (rolesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = java.util.Collections.unmodifiableList(roles_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.roles_ = roles_; + } else { + result.roles_ = rolesBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) return this; + if (usersBuilder_ == null) { + if (!other.users_.isEmpty()) { + if (users_.isEmpty()) { + users_ = other.users_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureUsersIsMutable(); + users_.addAll(other.users_); + } + onChanged(); + } + } else { + if (!other.users_.isEmpty()) { + if (usersBuilder_.isEmpty()) { + usersBuilder_.dispose(); + usersBuilder_ = null; + users_ = other.users_; + bitField0_ = (bitField0_ & ~0x00000001); + usersBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getUsersFieldBuilder() : null; + } else { + usersBuilder_.addAllMessages(other.users_); + } + } + } + if (rolesBuilder_ == null) { + if (!other.roles_.isEmpty()) { + if (roles_.isEmpty()) { + roles_ = other.roles_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRolesIsMutable(); + roles_.addAll(other.roles_); + } + onChanged(); + } + } else { + if (!other.roles_.isEmpty()) { + if (rolesBuilder_.isEmpty()) { + rolesBuilder_.dispose(); + rolesBuilder_ = null; + roles_ = other.roles_; + bitField0_ = (bitField0_ & ~0x00000002); + rolesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRolesFieldBuilder() : null; + } else { + rolesBuilder_.addAllMessages(other.roles_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getUsersCount(); i++) { + if (!getUsers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRolesCount(); i++) { + if (!getRoles(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + private java.util.List users_ = + java.util.Collections.emptyList(); + private void ensureUsersIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + users_ = new java.util.ArrayList(users_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> usersBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List getUsersList() { + if (usersBuilder_ == null) { + return java.util.Collections.unmodifiableList(users_); + } else { + return usersBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public int getUsersCount() { + if (usersBuilder_ == null) { + return users_.size(); + } else { + return usersBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getUsers(int index) { + if (usersBuilder_ == null) { + return users_.get(index); + } else { + return usersBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder setUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.set(index, value); + onChanged(); + } else { + usersBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder setUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.set(index, builderForValue.build()); + onChanged(); + } else { + usersBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.add(value); + onChanged(); + } else { + usersBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (usersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureUsersIsMutable(); + users_.add(index, value); + onChanged(); + } else { + usersBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.add(builderForValue.build()); + onChanged(); + } else { + usersBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addUsers( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.add(index, builderForValue.build()); + onChanged(); + } else { + usersBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder addAllUsers( + java.lang.Iterable values) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + super.addAll(values, users_); + onChanged(); + } else { + usersBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder clearUsers() { + if (usersBuilder_ == null) { + users_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + usersBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public Builder removeUsers(int index) { + if (usersBuilder_ == null) { + ensureUsersIsMutable(); + users_.remove(index); + onChanged(); + } else { + usersBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getUsersBuilder( + int index) { + return getUsersFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getUsersOrBuilder( + int index) { + if (usersBuilder_ == null) { + return users_.get(index); } else { + return usersBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersOrBuilderList() { + if (usersBuilder_ != null) { + return usersBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(users_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder() { + return getUsersFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addUsersBuilder( + int index) { + return getUsersFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry users = 1; + */ + public java.util.List + getUsersBuilderList() { + return getUsersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> + getUsersFieldBuilder() { + if (usersBuilder_ == null) { + usersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( + users_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + users_ = null; + } + return usersBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + private java.util.List roles_ = + java.util.Collections.emptyList(); + private void ensureRolesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + roles_ = new java.util.ArrayList(roles_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> rolesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List getRolesList() { + if (rolesBuilder_ == null) { + return java.util.Collections.unmodifiableList(roles_); + } else { + return rolesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public int getRolesCount() { + if (rolesBuilder_ == null) { + return roles_.size(); + } else { + return rolesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry getRoles(int index) { + if (rolesBuilder_ == null) { + return roles_.get(index); + } else { + return rolesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder setRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.set(index, value); + onChanged(); + } else { + rolesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder setRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.set(index, builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.add(value); + onChanged(); + } else { + rolesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry value) { + if (rolesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRolesIsMutable(); + roles_.add(index, value); + onChanged(); + } else { + rolesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.add(builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addRoles( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder builderForValue) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.add(index, builderForValue.build()); + onChanged(); + } else { + rolesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder addAllRoles( + java.lang.Iterable values) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + super.addAll(values, roles_); + onChanged(); + } else { + rolesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder clearRoles() { + if (rolesBuilder_ == null) { + roles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + rolesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public Builder removeRoles(int index) { + if (rolesBuilder_ == null) { + ensureRolesIsMutable(); + roles_.remove(index); + onChanged(); + } else { + rolesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder getRolesBuilder( + int index) { + return getRolesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder getRolesOrBuilder( + int index) { + if (rolesBuilder_ == null) { + return roles_.get(index); } else { + return rolesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesOrBuilderList() { + if (rolesBuilder_ != null) { + return rolesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(roles_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder() { + return getRolesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder addRolesBuilder( + int index) { + return getRolesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSetEntry roles = 2; + */ + public java.util.List + getRolesBuilderList() { + return getRolesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder> + getRolesFieldBuilder() { + if (rolesBuilder_ == null) { + rolesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntry.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetEntryOrBuilder>( + roles_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + roles_ = null; + } + return rolesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) + } + + static { + defaultInstance = new PrincipalPrivilegeSet(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet) + } + + public interface PrivilegeGrantInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string privilege = 1; + /** + * optional string privilege = 1; + */ + boolean hasPrivilege(); + /** + * optional string privilege = 1; + */ + java.lang.String getPrivilege(); + /** + * optional string privilege = 1; + */ + com.google.protobuf.ByteString + getPrivilegeBytes(); + + // optional int64 create_time = 2; + /** + * optional int64 create_time = 2; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 2; + */ + long getCreateTime(); + + // optional string grantor = 3; + /** + * optional string grantor = 3; + */ + boolean hasGrantor(); + /** + * optional string grantor = 3; + */ + java.lang.String getGrantor(); + /** + * optional string grantor = 3; + */ + com.google.protobuf.ByteString + getGrantorBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + boolean hasGrantorType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); + + // optional bool grant_option = 5; + /** + * optional bool grant_option = 5; + */ + boolean hasGrantOption(); + /** + * optional bool grant_option = 5; + */ + boolean getGrantOption(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} + */ + public static final class PrivilegeGrantInfo extends + com.google.protobuf.GeneratedMessage + implements PrivilegeGrantInfoOrBuilder { + // Use PrivilegeGrantInfo.newBuilder() to construct. + private PrivilegeGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PrivilegeGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PrivilegeGrantInfo defaultInstance; + public static PrivilegeGrantInfo getDefaultInstance() { + return defaultInstance; + } + + public PrivilegeGrantInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PrivilegeGrantInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + privilege_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + createTime_ = input.readInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + grantor_ = input.readBytes(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + grantorType_ = value; + } + break; + } + case 40: { + bitField0_ |= 0x00000010; + grantOption_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PrivilegeGrantInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PrivilegeGrantInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string privilege = 1; + public static final int PRIVILEGE_FIELD_NUMBER = 1; + private java.lang.Object privilege_; + /** + * optional string privilege = 1; + */ + public boolean hasPrivilege() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string privilege = 1; + */ + public java.lang.String getPrivilege() { + java.lang.Object ref = privilege_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + privilege_ = s; + } + return s; + } + } + /** + * optional string privilege = 1; + */ + public com.google.protobuf.ByteString + getPrivilegeBytes() { + java.lang.Object ref = privilege_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + privilege_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 create_time = 2; + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private long createTime_; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + + // optional string grantor = 3; + public static final int GRANTOR_FIELD_NUMBER = 3; + private java.lang.Object grantor_; + /** + * optional string grantor = 3; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string grantor = 3; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + grantor_ = s; + } + return s; + } + } + /** + * optional string grantor = 3; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + public static final int GRANTOR_TYPE_FIELD_NUMBER = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + + // optional bool grant_option = 5; + public static final int GRANT_OPTION_FIELD_NUMBER = 5; + private boolean grantOption_; + /** + * optional bool grant_option = 5; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool grant_option = 5; + */ + public boolean getGrantOption() { + return grantOption_; + } + + private void initFields() { + privilege_ = ""; + createTime_ = 0L; + grantor_ = ""; + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + grantOption_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrivilegeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getGrantorBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, grantOption_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrivilegeBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getGrantorBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, grantOption_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + privilege_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + grantor_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000008); + grantOption_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.privilege_ = privilege_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.grantor_ = grantor_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.grantorType_ = grantorType_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.grantOption_ = grantOption_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo.getDefaultInstance()) return this; + if (other.hasPrivilege()) { + bitField0_ |= 0x00000001; + privilege_ = other.privilege_; + onChanged(); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasGrantor()) { + bitField0_ |= 0x00000004; + grantor_ = other.grantor_; + onChanged(); + } + if (other.hasGrantorType()) { + setGrantorType(other.getGrantorType()); + } + if (other.hasGrantOption()) { + setGrantOption(other.getGrantOption()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrivilegeGrantInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string privilege = 1; + private java.lang.Object privilege_ = ""; + /** + * optional string privilege = 1; + */ + public boolean hasPrivilege() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string privilege = 1; + */ + public java.lang.String getPrivilege() { + java.lang.Object ref = privilege_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + privilege_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string privilege = 1; + */ + public com.google.protobuf.ByteString + getPrivilegeBytes() { + java.lang.Object ref = privilege_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + privilege_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string privilege = 1; + */ + public Builder setPrivilege( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + privilege_ = value; + onChanged(); + return this; + } + /** + * optional string privilege = 1; + */ + public Builder clearPrivilege() { + bitField0_ = (bitField0_ & ~0x00000001); + privilege_ = getDefaultInstance().getPrivilege(); + onChanged(); + return this; + } + /** + * optional string privilege = 1; + */ + public Builder setPrivilegeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + privilege_ = value; + onChanged(); + return this; + } + + // optional int64 create_time = 2; + private long createTime_ ; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 2; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000002; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 2; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional string grantor = 3; + private java.lang.Object grantor_ = ""; + /** + * optional string grantor = 3; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string grantor = 3; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + grantor_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string grantor = 3; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string grantor = 3; + */ + public Builder setGrantor( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + grantor_ = value; + onChanged(); + return this; + } + /** + * optional string grantor = 3; + */ + public Builder clearGrantor() { + bitField0_ = (bitField0_ & ~0x00000004); + grantor_ = getDefaultInstance().getGrantor(); + onChanged(); + return this; + } + /** + * optional string grantor = 3; + */ + public Builder setGrantorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + grantor_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantorType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 4; + */ + public Builder clearGrantorType() { + bitField0_ = (bitField0_ & ~0x00000008); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional bool grant_option = 5; + private boolean grantOption_ ; + /** + * optional bool grant_option = 5; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool grant_option = 5; + */ + public boolean getGrantOption() { + return grantOption_; + } + /** + * optional bool grant_option = 5; + */ + public Builder setGrantOption(boolean value) { + bitField0_ |= 0x00000010; + grantOption_ = value; + onChanged(); + return this; + } + /** + * optional bool grant_option = 5; + */ + public Builder clearGrantOption() { + bitField0_ = (bitField0_ & ~0x00000010); + grantOption_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) + } + + static { + defaultInstance = new PrivilegeGrantInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PrivilegeGrantInfo) + } + + public interface RoleGrantInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string principal_name = 1; + /** + * required string principal_name = 1; + */ + boolean hasPrincipalName(); + /** + * required string principal_name = 1; + */ + java.lang.String getPrincipalName(); + /** + * required string principal_name = 1; + */ + com.google.protobuf.ByteString + getPrincipalNameBytes(); + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + boolean hasPrincipalType(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType(); + + // optional int64 add_time = 3; + /** + * optional int64 add_time = 3; + */ + boolean hasAddTime(); + /** + * optional int64 add_time = 3; + */ + long getAddTime(); + + // optional string grantor = 4; + /** + * optional string grantor = 4; + */ + boolean hasGrantor(); + /** + * optional string grantor = 4; + */ + java.lang.String getGrantor(); + /** + * optional string grantor = 4; + */ + com.google.protobuf.ByteString + getGrantorBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + boolean hasGrantorType(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType(); + + // optional bool grant_option = 6; + /** + * optional bool grant_option = 6; + */ + boolean hasGrantOption(); + /** + * optional bool grant_option = 6; + */ + boolean getGrantOption(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} + */ + public static final class RoleGrantInfo extends + com.google.protobuf.GeneratedMessage + implements RoleGrantInfoOrBuilder { + // Use RoleGrantInfo.newBuilder() to construct. + private RoleGrantInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleGrantInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleGrantInfo defaultInstance; + public static RoleGrantInfo getDefaultInstance() { + return defaultInstance; + } + + public RoleGrantInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleGrantInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + principalName_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + principalType_ = value; + } + break; + } + case 24: { + bitField0_ |= 0x00000004; + addTime_ = input.readInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + grantor_ = input.readBytes(); + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + grantorType_ = value; + } + break; + } + case 48: { + bitField0_ |= 0x00000020; + grantOption_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleGrantInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleGrantInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string principal_name = 1; + public static final int PRINCIPAL_NAME_FIELD_NUMBER = 1; + private java.lang.Object principalName_; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + principalName_ = s; + } + return s; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + public static final int PRINCIPAL_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public boolean hasPrincipalType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { + return principalType_; + } + + // optional int64 add_time = 3; + public static final int ADD_TIME_FIELD_NUMBER = 3; + private long addTime_; + /** + * optional int64 add_time = 3; + */ + public boolean hasAddTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 add_time = 3; + */ + public long getAddTime() { + return addTime_; + } + + // optional string grantor = 4; + public static final int GRANTOR_FIELD_NUMBER = 4; + private java.lang.Object grantor_; + /** + * optional string grantor = 4; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string grantor = 4; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + grantor_ = s; + } + return s; + } + } + /** + * optional string grantor = 4; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + public static final int GRANTOR_TYPE_FIELD_NUMBER = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + + // optional bool grant_option = 6; + public static final int GRANT_OPTION_FIELD_NUMBER = 6; + private boolean grantOption_; + /** + * optional bool grant_option = 6; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool grant_option = 6; + */ + public boolean getGrantOption() { + return grantOption_; + } + + private void initFields() { + principalName_ = ""; + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + addTime_ = 0L; + grantor_ = ""; + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + grantOption_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPrincipalName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPrincipalType()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPrincipalNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, principalType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, addTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getGrantorBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, grantOption_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPrincipalNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, principalType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, addTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getGrantorBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, grantorType_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, grantOption_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + principalName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000002); + addTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + grantor_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + bitField0_ = (bitField0_ & ~0x00000010); + grantOption_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.principalName_ = principalName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.principalType_ = principalType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.addTime_ = addTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.grantor_ = grantor_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.grantorType_ = grantorType_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.grantOption_ = grantOption_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()) return this; + if (other.hasPrincipalName()) { + bitField0_ |= 0x00000001; + principalName_ = other.principalName_; + onChanged(); + } + if (other.hasPrincipalType()) { + setPrincipalType(other.getPrincipalType()); + } + if (other.hasAddTime()) { + setAddTime(other.getAddTime()); + } + if (other.hasGrantor()) { + bitField0_ |= 0x00000008; + grantor_ = other.grantor_; + onChanged(); + } + if (other.hasGrantorType()) { + setGrantorType(other.getGrantorType()); + } + if (other.hasGrantOption()) { + setGrantOption(other.getGrantOption()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPrincipalName()) { + + return false; + } + if (!hasPrincipalType()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string principal_name = 1; + private java.lang.Object principalName_ = ""; + /** + * required string principal_name = 1; + */ + public boolean hasPrincipalName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string principal_name = 1; + */ + public java.lang.String getPrincipalName() { + java.lang.Object ref = principalName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + principalName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string principal_name = 1; + */ + public com.google.protobuf.ByteString + getPrincipalNameBytes() { + java.lang.Object ref = principalName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + principalName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder clearPrincipalName() { + bitField0_ = (bitField0_ & ~0x00000001); + principalName_ = getDefaultInstance().getPrincipalName(); + onChanged(); + return this; + } + /** + * required string principal_name = 1; + */ + public Builder setPrincipalNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + principalName_ = value; + onChanged(); + return this; + } + + // required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public boolean hasPrincipalType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getPrincipalType() { + return principalType_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public Builder setPrincipalType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + principalType_ = value; + onChanged(); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PrincipalType principal_type = 2; + */ + public Builder clearPrincipalType() { + bitField0_ = (bitField0_ & ~0x00000002); + principalType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional int64 add_time = 3; + private long addTime_ ; + /** + * optional int64 add_time = 3; + */ + public boolean hasAddTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 add_time = 3; + */ + public long getAddTime() { + return addTime_; + } + /** + * optional int64 add_time = 3; + */ + public Builder setAddTime(long value) { + bitField0_ |= 0x00000004; + addTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 add_time = 3; + */ + public Builder clearAddTime() { + bitField0_ = (bitField0_ & ~0x00000004); + addTime_ = 0L; + onChanged(); + return this; + } + + // optional string grantor = 4; + private java.lang.Object grantor_ = ""; + /** + * optional string grantor = 4; + */ + public boolean hasGrantor() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string grantor = 4; + */ + public java.lang.String getGrantor() { + java.lang.Object ref = grantor_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + grantor_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string grantor = 4; + */ + public com.google.protobuf.ByteString + getGrantorBytes() { + java.lang.Object ref = grantor_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + grantor_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string grantor = 4; + */ + public Builder setGrantor( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantor_ = value; + onChanged(); + return this; + } + /** + * optional string grantor = 4; + */ + public Builder clearGrantor() { + bitField0_ = (bitField0_ & ~0x00000008); + grantor_ = getDefaultInstance().getGrantor(); + onChanged(); + return this; + } + /** + * optional string grantor = 4; + */ + public Builder setGrantorBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + grantor_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public boolean hasGrantorType() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType getGrantorType() { + return grantorType_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public Builder setGrantorType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + grantorType_ = value; + onChanged(); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalType grantor_type = 5; + */ + public Builder clearGrantorType() { + bitField0_ = (bitField0_ & ~0x00000010); + grantorType_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalType.USER; + onChanged(); + return this; + } + + // optional bool grant_option = 6; + private boolean grantOption_ ; + /** + * optional bool grant_option = 6; + */ + public boolean hasGrantOption() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool grant_option = 6; + */ + public boolean getGrantOption() { + return grantOption_; + } + /** + * optional bool grant_option = 6; + */ + public Builder setGrantOption(boolean value) { + bitField0_ |= 0x00000020; + grantOption_ = value; + onChanged(); + return this; + } + /** + * optional bool grant_option = 6; + */ + public Builder clearGrantOption() { + bitField0_ = (bitField0_ & ~0x00000020); + grantOption_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) + } + + static { + defaultInstance = new RoleGrantInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo) + } + + public interface RoleGrantInfoListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + java.util.List + getGrantInfoList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + int getGrantInfoCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + java.util.List + getGrantInfoOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} + */ + public static final class RoleGrantInfoList extends + com.google.protobuf.GeneratedMessage + implements RoleGrantInfoListOrBuilder { + // Use RoleGrantInfoList.newBuilder() to construct. + private RoleGrantInfoList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleGrantInfoList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleGrantInfoList defaultInstance; + public static RoleGrantInfoList getDefaultInstance() { + return defaultInstance; + } + + public RoleGrantInfoList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleGrantInfoList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + grantInfo_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleGrantInfoList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleGrantInfoList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + public static final int GRANT_INFO_FIELD_NUMBER = 1; + private java.util.List grantInfo_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List getGrantInfoList() { + return grantInfo_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoOrBuilderList() { + return grantInfo_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public int getGrantInfoCount() { + return grantInfo_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { + return grantInfo_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index) { + return grantInfo_.get(index); + } + + private void initFields() { + grantInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getGrantInfoCount(); i++) { + if (!getGrantInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < grantInfo_.size(); i++) { + output.writeMessage(1, grantInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < grantInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, grantInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGrantInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (grantInfoBuilder_ == null) { + grantInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + grantInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList(this); + int from_bitField0_ = bitField0_; + if (grantInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = java.util.Collections.unmodifiableList(grantInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.grantInfo_ = grantInfo_; + } else { + result.grantInfo_ = grantInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList.getDefaultInstance()) return this; + if (grantInfoBuilder_ == null) { + if (!other.grantInfo_.isEmpty()) { + if (grantInfo_.isEmpty()) { + grantInfo_ = other.grantInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureGrantInfoIsMutable(); + grantInfo_.addAll(other.grantInfo_); + } + onChanged(); + } + } else { + if (!other.grantInfo_.isEmpty()) { + if (grantInfoBuilder_.isEmpty()) { + grantInfoBuilder_.dispose(); + grantInfoBuilder_ = null; + grantInfo_ = other.grantInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + grantInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGrantInfoFieldBuilder() : null; + } else { + grantInfoBuilder_.addAllMessages(other.grantInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getGrantInfoCount(); i++) { + if (!getGrantInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + private java.util.List grantInfo_ = + java.util.Collections.emptyList(); + private void ensureGrantInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + grantInfo_ = new java.util.ArrayList(grantInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> grantInfoBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List getGrantInfoList() { + if (grantInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(grantInfo_); + } else { + return grantInfoBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public int getGrantInfoCount() { + if (grantInfoBuilder_ == null) { + return grantInfo_.size(); + } else { + return grantInfoBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo getGrantInfo(int index) { + if (grantInfoBuilder_ == null) { + return grantInfo_.get(index); + } else { + return grantInfoBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder setGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.set(index, value); + onChanged(); + } else { + grantInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder setGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.add(value); + onChanged(); + } else { + grantInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo value) { + if (grantInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGrantInfoIsMutable(); + grantInfo_.add(index, value); + onChanged(); + } else { + grantInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.add(builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addGrantInfo( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder builderForValue) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + grantInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder addAllGrantInfo( + java.lang.Iterable values) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + super.addAll(values, grantInfo_); + onChanged(); + } else { + grantInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder clearGrantInfo() { + if (grantInfoBuilder_ == null) { + grantInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + grantInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public Builder removeGrantInfo(int index) { + if (grantInfoBuilder_ == null) { + ensureGrantInfoIsMutable(); + grantInfo_.remove(index); + onChanged(); + } else { + grantInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder getGrantInfoBuilder( + int index) { + return getGrantInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder getGrantInfoOrBuilder( + int index) { + if (grantInfoBuilder_ == null) { + return grantInfo_.get(index); } else { + return grantInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoOrBuilderList() { + if (grantInfoBuilder_ != null) { + return grantInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(grantInfo_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder() { + return getGrantInfoFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder addGrantInfoBuilder( + int index) { + return getGrantInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.RoleGrantInfo grant_info = 1; + */ + public java.util.List + getGrantInfoBuilderList() { + return getGrantInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder> + getGrantInfoFieldBuilder() { + if (grantInfoBuilder_ == null) { + grantInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleGrantInfoOrBuilder>( + grantInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + grantInfo_ = null; + } + return grantInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) + } + + static { + defaultInstance = new RoleGrantInfoList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleGrantInfoList) + } + + public interface RoleListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string role = 1; + /** + * repeated string role = 1; + */ + java.util.List + getRoleList(); + /** + * repeated string role = 1; + */ + int getRoleCount(); + /** + * repeated string role = 1; + */ + java.lang.String getRole(int index); + /** + * repeated string role = 1; + */ + com.google.protobuf.ByteString + getRoleBytes(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} + */ + public static final class RoleList extends + com.google.protobuf.GeneratedMessage + implements RoleListOrBuilder { + // Use RoleList.newBuilder() to construct. + private RoleList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RoleList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RoleList defaultInstance; + public static RoleList getDefaultInstance() { + return defaultInstance; + } + + public RoleList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RoleList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + role_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.UnmodifiableLazyStringList(role_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RoleList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RoleList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string role = 1; + public static final int ROLE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList role_; + /** + * repeated string role = 1; + */ + public java.util.List + getRoleList() { + return role_; + } + /** + * repeated string role = 1; + */ + public int getRoleCount() { + return role_.size(); + } + /** + * repeated string role = 1; + */ + public java.lang.String getRole(int index) { + return role_.get(index); + } + /** + * repeated string role = 1; + */ + public com.google.protobuf.ByteString + getRoleBytes(int index) { + return role_.getByteString(index); + } + + private void initFields() { + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < role_.size(); i++) { + output.writeBytes(1, role_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < role_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(role_.getByteString(i)); + } + size += dataSize; + size += 1 * getRoleList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.RoleList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.UnmodifiableLazyStringList( + role_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.role_ = role_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList.getDefaultInstance()) return this; + if (!other.role_.isEmpty()) { + if (role_.isEmpty()) { + role_ = other.role_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRoleIsMutable(); + role_.addAll(other.role_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string role = 1; + private com.google.protobuf.LazyStringList role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureRoleIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + role_ = new com.google.protobuf.LazyStringArrayList(role_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string role = 1; + */ + public java.util.List + getRoleList() { + return java.util.Collections.unmodifiableList(role_); + } + /** + * repeated string role = 1; + */ + public int getRoleCount() { + return role_.size(); + } + /** + * repeated string role = 1; + */ + public java.lang.String getRole(int index) { + return role_.get(index); + } + /** + * repeated string role = 1; + */ + public com.google.protobuf.ByteString + getRoleBytes(int index) { + return role_.getByteString(index); + } + /** + * repeated string role = 1; + */ + public Builder setRole( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addRole( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.add(value); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addAllRole( + java.lang.Iterable values) { + ensureRoleIsMutable(); + super.addAll(values, role_); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder clearRole() { + role_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string role = 1; + */ + public Builder addRoleBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureRoleIsMutable(); + role_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) + } + + static { + defaultInstance = new RoleList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.RoleList) + } + + public interface RoleOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int64 create_time = 1; + /** + * optional int64 create_time = 1; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 1; + */ + long getCreateTime(); + + // optional string owner_name = 2; + /** + * optional string owner_name = 2; + */ + boolean hasOwnerName(); + /** + * optional string owner_name = 2; + */ + java.lang.String getOwnerName(); + /** + * optional string owner_name = 2; + */ + com.google.protobuf.ByteString + getOwnerNameBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} + */ + public static final class Role extends + com.google.protobuf.GeneratedMessage + implements RoleOrBuilder { + // Use Role.newBuilder() to construct. + private Role(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Role(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Role defaultInstance; + public static Role getDefaultInstance() { + return defaultInstance; + } + + public Role getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Role( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + createTime_ = input.readInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + ownerName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Role parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Role(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional int64 create_time = 1; + public static final int CREATE_TIME_FIELD_NUMBER = 1; + private long createTime_; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + + // optional string owner_name = 2; + public static final int OWNER_NAME_FIELD_NUMBER = 2; + private java.lang.Object ownerName_; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + ownerName_ = s; + } + return s; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + createTime_ = 0L; + ownerName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getOwnerNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, createTime_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getOwnerNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Role} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.RoleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + ownerName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ownerName_ = ownerName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role.getDefaultInstance()) return this; + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasOwnerName()) { + bitField0_ |= 0x00000002; + ownerName_ = other.ownerName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Role) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int64 create_time = 1; + private long createTime_ ; + /** + * optional int64 create_time = 1; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int64 create_time = 1; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 1; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000001; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 1; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional string owner_name = 2; + private java.lang.Object ownerName_ = ""; + /** + * optional string owner_name = 2; + */ + public boolean hasOwnerName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string owner_name = 2; + */ + public java.lang.String getOwnerName() { + java.lang.Object ref = ownerName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + ownerName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner_name = 2; + */ + public com.google.protobuf.ByteString + getOwnerNameBytes() { + java.lang.Object ref = ownerName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + ownerName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder clearOwnerName() { + bitField0_ = (bitField0_ & ~0x00000002); + ownerName_ = getDefaultInstance().getOwnerName(); + onChanged(); + return this; + } + /** + * optional string owner_name = 2; + */ + public Builder setOwnerNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + ownerName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Role) + } + + static { + defaultInstance = new Role(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Role) + } + + public interface StorageDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + java.util.List + getColsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + int getColsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + java.util.List + getColsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index); + + // optional string input_format = 2; + /** + * optional string input_format = 2; + */ + boolean hasInputFormat(); + /** + * optional string input_format = 2; + */ + java.lang.String getInputFormat(); + /** + * optional string input_format = 2; + */ + com.google.protobuf.ByteString + getInputFormatBytes(); + + // optional string output_format = 3; + /** + * optional string output_format = 3; + */ + boolean hasOutputFormat(); + /** + * optional string output_format = 3; + */ + java.lang.String getOutputFormat(); + /** + * optional string output_format = 3; + */ + com.google.protobuf.ByteString + getOutputFormatBytes(); + + // optional bool is_compressed = 4; + /** + * optional bool is_compressed = 4; + */ + boolean hasIsCompressed(); + /** + * optional bool is_compressed = 4; + */ + boolean getIsCompressed(); + + // optional sint32 num_buckets = 5; + /** + * optional sint32 num_buckets = 5; + */ + boolean hasNumBuckets(); + /** + * optional sint32 num_buckets = 5; + */ + int getNumBuckets(); + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + boolean hasSerdeInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder(); + + // repeated string bucket_cols = 7; + /** + * repeated string bucket_cols = 7; + */ + java.util.List + getBucketColsList(); + /** + * repeated string bucket_cols = 7; + */ + int getBucketColsCount(); + /** + * repeated string bucket_cols = 7; + */ + java.lang.String getBucketCols(int index); + /** + * repeated string bucket_cols = 7; + */ + com.google.protobuf.ByteString + getBucketColsBytes(int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + java.util.List + getSortColsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + int getSortColsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + java.util.List + getSortColsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index); + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + boolean hasSkewedInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder(); + + // optional bool stored_as_sub_directories = 10; + /** + * optional bool stored_as_sub_directories = 10; + */ + boolean hasStoredAsSubDirectories(); + /** + * optional bool stored_as_sub_directories = 10; + */ + boolean getStoredAsSubDirectories(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} + */ + public static final class StorageDescriptor extends + com.google.protobuf.GeneratedMessage + implements StorageDescriptorOrBuilder { + // Use StorageDescriptor.newBuilder() to construct. + private StorageDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StorageDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StorageDescriptor defaultInstance; + public static StorageDescriptor getDefaultInstance() { + return defaultInstance; + } + + public StorageDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StorageDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + cols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); + break; + } + case 18: { + bitField0_ |= 0x00000001; + inputFormat_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000002; + outputFormat_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000004; + isCompressed_ = input.readBool(); + break; + } + case 40: { + bitField0_ |= 0x00000008; + numBuckets_ = input.readSInt32(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = serdeInfo_.toBuilder(); + } + serdeInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serdeInfo_); + serdeInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000040; + } + bucketCols_.add(input.readBytes()); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + sortCols_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.PARSER, extensionRegistry)); + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = skewedInfo_.toBuilder(); + } + skewedInfo_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(skewedInfo_); + skewedInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 80: { + bitField0_ |= 0x00000040; + storedAsSubDirectories_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = java.util.Collections.unmodifiableList(cols_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList(bucketCols_); + } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = java.util.Collections.unmodifiableList(sortCols_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StorageDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StorageDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface OrderOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string column_name = 1; + /** + * required string column_name = 1; + */ + boolean hasColumnName(); + /** + * required string column_name = 1; + */ + java.lang.String getColumnName(); + /** + * required string column_name = 1; + */ + com.google.protobuf.ByteString + getColumnNameBytes(); + + // optional sint32 order = 2 [default = 1]; + /** + * optional sint32 order = 2 [default = 1]; + */ + boolean hasOrder(); + /** + * optional sint32 order = 2 [default = 1]; + */ + int getOrder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} + */ + public static final class Order extends + com.google.protobuf.GeneratedMessage + implements OrderOrBuilder { + // Use Order.newBuilder() to construct. + private Order(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Order(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Order defaultInstance; + public static Order getDefaultInstance() { + return defaultInstance; + } + + public Order getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Order( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + columnName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + order_ = input.readSInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Order parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Order(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string column_name = 1; + public static final int COLUMN_NAME_FIELD_NUMBER = 1; + private java.lang.Object columnName_; + /** + * required string column_name = 1; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string column_name = 1; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + columnName_ = s; + } + return s; + } + } + /** + * required string column_name = 1; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional sint32 order = 2 [default = 1]; + public static final int ORDER_FIELD_NUMBER = 2; + private int order_; + /** + * optional sint32 order = 2 [default = 1]; + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public int getOrder() { + return order_; + } + + private void initFields() { + columnName_ = ""; + order_ = 1; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasColumnName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getColumnNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt32(2, order_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getColumnNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(2, order_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + columnName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + order_ = 1; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.columnName_ = columnName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.order_ = order_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()) return this; + if (other.hasColumnName()) { + bitField0_ |= 0x00000001; + columnName_ = other.columnName_; + onChanged(); + } + if (other.hasOrder()) { + setOrder(other.getOrder()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasColumnName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string column_name = 1; + private java.lang.Object columnName_ = ""; + /** + * required string column_name = 1; + */ + public boolean hasColumnName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string column_name = 1; + */ + public java.lang.String getColumnName() { + java.lang.Object ref = columnName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + columnName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string column_name = 1; + */ + public com.google.protobuf.ByteString + getColumnNameBytes() { + java.lang.Object ref = columnName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + columnName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string column_name = 1; + */ + public Builder setColumnName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + columnName_ = value; + onChanged(); + return this; + } + /** + * required string column_name = 1; + */ + public Builder clearColumnName() { + bitField0_ = (bitField0_ & ~0x00000001); + columnName_ = getDefaultInstance().getColumnName(); + onChanged(); + return this; + } + /** + * required string column_name = 1; + */ + public Builder setColumnNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + columnName_ = value; + onChanged(); + return this; + } + + // optional sint32 order = 2 [default = 1]; + private int order_ = 1; + /** + * optional sint32 order = 2 [default = 1]; + */ + public boolean hasOrder() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public int getOrder() { + return order_; + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public Builder setOrder(int value) { + bitField0_ |= 0x00000002; + order_ = value; + onChanged(); + return this; + } + /** + * optional sint32 order = 2 [default = 1]; + */ + public Builder clearOrder() { + bitField0_ = (bitField0_ & ~0x00000002); + order_ = 1; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) + } + + static { + defaultInstance = new Order(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) + } + + public interface SerDeInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string name = 1; + /** + * optional string name = 1; + */ + boolean hasName(); + /** + * optional string name = 1; + */ + java.lang.String getName(); + /** + * optional string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // optional string serialization_lib = 2; + /** + * optional string serialization_lib = 2; + */ + boolean hasSerializationLib(); + /** + * optional string serialization_lib = 2; + */ + java.lang.String getSerializationLib(); + /** + * optional string serialization_lib = 2; + */ + com.google.protobuf.ByteString + getSerializationLibBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} + */ + public static final class SerDeInfo extends + com.google.protobuf.GeneratedMessage + implements SerDeInfoOrBuilder { + // Use SerDeInfo.newBuilder() to construct. + private SerDeInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SerDeInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SerDeInfo defaultInstance; + public static SerDeInfo getDefaultInstance() { + return defaultInstance; + } + + public SerDeInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SerDeInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + serializationLib_ = input.readBytes(); + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SerDeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SerDeInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string serialization_lib = 2; + public static final int SERIALIZATION_LIB_FIELD_NUMBER = 2; + private java.lang.Object serializationLib_; + /** + * optional string serialization_lib = 2; + */ + public boolean hasSerializationLib() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string serialization_lib = 2; + */ + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + serializationLib_ = s; + } + return s; + } + } + /** + * optional string serialization_lib = 2; + */ + public com.google.protobuf.ByteString + getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + public static final int PARAMETERS_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + private void initFields() { + name_ = ""; + serializationLib_ = ""; + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSerializationLibBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, parameters_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSerializationLibBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, parameters_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getParametersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + serializationLib_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.serializationLib_ = serializationLib_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasSerializationLib()) { + bitField0_ |= 0x00000002; + serializationLib_ = other.serializationLib_; + onChanged(); + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string name = 1; + private java.lang.Object name_ = ""; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // optional string serialization_lib = 2; + private java.lang.Object serializationLib_ = ""; + /** + * optional string serialization_lib = 2; + */ + public boolean hasSerializationLib() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string serialization_lib = 2; + */ + public java.lang.String getSerializationLib() { + java.lang.Object ref = serializationLib_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + serializationLib_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string serialization_lib = 2; + */ + public com.google.protobuf.ByteString + getSerializationLibBytes() { + java.lang.Object ref = serializationLib_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serializationLib_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string serialization_lib = 2; + */ + public Builder setSerializationLib( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + serializationLib_ = value; + onChanged(); + return this; + } + /** + * optional string serialization_lib = 2; + */ + public Builder clearSerializationLib() { + bitField0_ = (bitField0_ & ~0x00000002); + serializationLib_ = getDefaultInstance().getSerializationLib(); + onChanged(); + return this; + } + /** + * optional string serialization_lib = 2; + */ + public Builder setSerializationLibBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + serializationLib_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) + } + + static { + defaultInstance = new SerDeInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo) + } + + public interface SkewedInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string skewed_col_names = 1; + /** + * repeated string skewed_col_names = 1; + */ + java.util.List + getSkewedColNamesList(); + /** + * repeated string skewed_col_names = 1; + */ + int getSkewedColNamesCount(); + /** + * repeated string skewed_col_names = 1; + */ + java.lang.String getSkewedColNames(int index); + /** + * repeated string skewed_col_names = 1; + */ + com.google.protobuf.ByteString + getSkewedColNamesBytes(int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + java.util.List + getSkewedColValuesList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + int getSkewedColValuesCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + java.util.List + getSkewedColValuesOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + java.util.List + getSkewedColValueLocationMapsList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + int getSkewedColValueLocationMapsCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + java.util.List + getSkewedColValueLocationMapsOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} + */ + public static final class SkewedInfo extends + com.google.protobuf.GeneratedMessage + implements SkewedInfoOrBuilder { + // Use SkewedInfo.newBuilder() to construct. + private SkewedInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedInfo defaultInstance; + public static SkewedInfo getDefaultInstance() { + return defaultInstance; + } + + public SkewedInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + skewedColNames_.add(input.readBytes()); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + skewedColValues_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + skewedColValueLocationMaps_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColNames_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface SkewedColValueListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string skewed_col_value = 1; + /** + * repeated string skewed_col_value = 1; + */ + java.util.List + getSkewedColValueList(); + /** + * repeated string skewed_col_value = 1; + */ + int getSkewedColValueCount(); + /** + * repeated string skewed_col_value = 1; + */ + java.lang.String getSkewedColValue(int index); + /** + * repeated string skewed_col_value = 1; + */ + com.google.protobuf.ByteString + getSkewedColValueBytes(int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} + */ + public static final class SkewedColValueList extends + com.google.protobuf.GeneratedMessage + implements SkewedColValueListOrBuilder { + // Use SkewedColValueList.newBuilder() to construct. + private SkewedColValueList(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedColValueList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedColValueList defaultInstance; + public static SkewedColValueList getDefaultInstance() { + return defaultInstance; + } + + public SkewedColValueList getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedColValueList( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + skewedColValue_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList(skewedColValue_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedColValueList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedColValueList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated string skewed_col_value = 1; + public static final int SKEWED_COL_VALUE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList skewedColValue_; + /** + * repeated string skewed_col_value = 1; + */ + public java.util.List + getSkewedColValueList() { + return skewedColValue_; + } + /** + * repeated string skewed_col_value = 1; + */ + public int getSkewedColValueCount() { + return skewedColValue_.size(); + } + /** + * repeated string skewed_col_value = 1; + */ + public java.lang.String getSkewedColValue(int index) { + return skewedColValue_.get(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public com.google.protobuf.ByteString + getSkewedColValueBytes(int index) { + return skewedColValue_.getByteString(index); + } + + private void initFields() { + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < skewedColValue_.size(); i++) { + output.writeBytes(1, skewedColValue_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < skewedColValue_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(skewedColValue_.getByteString(i)); + } + size += dataSize; + size += 1 * getSkewedColValueList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.UnmodifiableLazyStringList( + skewedColValue_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.skewedColValue_ = skewedColValue_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()) return this; + if (!other.skewedColValue_.isEmpty()) { + if (skewedColValue_.isEmpty()) { + skewedColValue_ = other.skewedColValue_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSkewedColValueIsMutable(); + skewedColValue_.addAll(other.skewedColValue_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string skewed_col_value = 1; + private com.google.protobuf.LazyStringList skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureSkewedColValueIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColValue_ = new com.google.protobuf.LazyStringArrayList(skewedColValue_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string skewed_col_value = 1; + */ + public java.util.List + getSkewedColValueList() { + return java.util.Collections.unmodifiableList(skewedColValue_); + } + /** + * repeated string skewed_col_value = 1; + */ + public int getSkewedColValueCount() { + return skewedColValue_.size(); + } + /** + * repeated string skewed_col_value = 1; + */ + public java.lang.String getSkewedColValue(int index) { + return skewedColValue_.get(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public com.google.protobuf.ByteString + getSkewedColValueBytes(int index) { + return skewedColValue_.getByteString(index); + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder setSkewedColValue( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addSkewedColValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.add(value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addAllSkewedColValue( + java.lang.Iterable values) { + ensureSkewedColValueIsMutable(); + super.addAll(values, skewedColValue_); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder clearSkewedColValue() { + skewedColValue_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string skewed_col_value = 1; + */ + public Builder addSkewedColValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueIsMutable(); + skewedColValue_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) + } + + static { + defaultInstance = new SkewedColValueList(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList) + } + + public interface SkewedColValueLocationMapOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated string key = 1; + /** + * repeated string key = 1; + */ + java.util.List + getKeyList(); + /** + * repeated string key = 1; + */ + int getKeyCount(); + /** + * repeated string key = 1; + */ + java.lang.String getKey(int index); + /** + * repeated string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(int index); + + // required string value = 2; + /** + * required string value = 2; + */ + boolean hasValue(); + /** + * required string value = 2; + */ + java.lang.String getValue(); + /** + * required string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} + */ + public static final class SkewedColValueLocationMap extends + com.google.protobuf.GeneratedMessage + implements SkewedColValueLocationMapOrBuilder { + // Use SkewedColValueLocationMap.newBuilder() to construct. + private SkewedColValueLocationMap(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SkewedColValueLocationMap(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SkewedColValueLocationMap defaultInstance; + public static SkewedColValueLocationMap getDefaultInstance() { + return defaultInstance; + } + + public SkewedColValueLocationMap getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SkewedColValueLocationMap( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + key_.add(input.readBytes()); + break; + } + case 18: { + bitField0_ |= 0x00000001; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.UnmodifiableLazyStringList(key_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SkewedColValueLocationMap parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SkewedColValueLocationMap(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // repeated string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList key_; + /** + * repeated string key = 1; + */ + public java.util.List + getKeyList() { + return key_; + } + /** + * repeated string key = 1; + */ + public int getKeyCount() { + return key_.size(); + } + /** + * repeated string key = 1; + */ + public java.lang.String getKey(int index) { + return key_.get(index); + } + /** + * repeated string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes(int index) { + return key_.getByteString(index); + } + + // required string value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + value_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < key_.size(); i++) { + output.writeBytes(1, key_.getByteString(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < key_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(key_.getByteString(i)); + } + size += dataSize; + size += 1 * getKeyList().size(); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.UnmodifiableLazyStringList( + key_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()) return this; + if (!other.key_.isEmpty()) { + if (key_.isEmpty()) { + key_ = other.key_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureKeyIsMutable(); + key_.addAll(other.key_); + } + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string key = 1; + private com.google.protobuf.LazyStringList key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureKeyIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + key_ = new com.google.protobuf.LazyStringArrayList(key_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string key = 1; + */ + public java.util.List + getKeyList() { + return java.util.Collections.unmodifiableList(key_); + } + /** + * repeated string key = 1; + */ + public int getKeyCount() { + return key_.size(); + } + /** + * repeated string key = 1; + */ + public java.lang.String getKey(int index) { + return key_.get(index); + } + /** + * repeated string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes(int index) { + return key_.getByteString(index); + } + /** + * repeated string key = 1; + */ + public Builder setKey( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.add(value); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addAllKey( + java.lang.Iterable values) { + ensureKeyIsMutable(); + super.addAll(values, key_); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder clearKey() { + key_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string key = 1; + */ + public Builder addKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKeyIsMutable(); + key_.add(value); + onChanged(); + return this; + } + + // required string value = 2; + private java.lang.Object value_ = ""; + /** + * required string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) + } + + static { + defaultInstance = new SkewedColValueLocationMap(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap) + } + + // repeated string skewed_col_names = 1; + public static final int SKEWED_COL_NAMES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList skewedColNames_; + /** + * repeated string skewed_col_names = 1; + */ + public java.util.List + getSkewedColNamesList() { + return skewedColNames_; + } + /** + * repeated string skewed_col_names = 1; + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + /** + * repeated string skewed_col_names = 1; + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public com.google.protobuf.ByteString + getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + public static final int SKEWED_COL_VALUES_FIELD_NUMBER = 2; + private java.util.List skewedColValues_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List getSkewedColValuesList() { + return skewedColValues_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesOrBuilderList() { + return skewedColValues_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public int getSkewedColValuesCount() { + return skewedColValues_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { + return skewedColValues_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index) { + return skewedColValues_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + public static final int SKEWED_COL_VALUE_LOCATION_MAPS_FIELD_NUMBER = 3; + private java.util.List skewedColValueLocationMaps_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List getSkewedColValueLocationMapsList() { + return skewedColValueLocationMaps_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsOrBuilderList() { + return skewedColValueLocationMaps_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public int getSkewedColValueLocationMapsCount() { + return skewedColValueLocationMaps_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { + return skewedColValueLocationMaps_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index) { + return skewedColValueLocationMaps_.get(index); + } + + private void initFields() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + skewedColValues_ = java.util.Collections.emptyList(); + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { + if (!getSkewedColValueLocationMaps(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < skewedColNames_.size(); i++) { + output.writeBytes(1, skewedColNames_.getByteString(i)); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + output.writeMessage(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { + output.writeMessage(3, skewedColValueLocationMaps_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < skewedColNames_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(skewedColNames_.getByteString(i)); + } + size += dataSize; + size += 1 * getSkewedColNamesList().size(); + } + for (int i = 0; i < skewedColValues_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, skewedColValues_.get(i)); + } + for (int i = 0; i < skewedColValueLocationMaps_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, skewedColValueLocationMaps_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSkewedColValuesFieldBuilder(); + getSkewedColValueLocationMapsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + skewedColValuesBuilder_.clear(); + } + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + skewedColValueLocationMapsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.UnmodifiableLazyStringList( + skewedColNames_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.skewedColNames_ = skewedColNames_; + if (skewedColValuesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = java.util.Collections.unmodifiableList(skewedColValues_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.skewedColValues_ = skewedColValues_; + } else { + result.skewedColValues_ = skewedColValuesBuilder_.build(); + } + if (skewedColValueLocationMapsBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.skewedColValueLocationMaps_ = skewedColValueLocationMaps_; + } else { + result.skewedColValueLocationMaps_ = skewedColValueLocationMapsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) return this; + if (!other.skewedColNames_.isEmpty()) { + if (skewedColNames_.isEmpty()) { + skewedColNames_ = other.skewedColNames_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSkewedColNamesIsMutable(); + skewedColNames_.addAll(other.skewedColNames_); + } + onChanged(); + } + if (skewedColValuesBuilder_ == null) { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValues_.isEmpty()) { + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureSkewedColValuesIsMutable(); + skewedColValues_.addAll(other.skewedColValues_); + } + onChanged(); + } + } else { + if (!other.skewedColValues_.isEmpty()) { + if (skewedColValuesBuilder_.isEmpty()) { + skewedColValuesBuilder_.dispose(); + skewedColValuesBuilder_ = null; + skewedColValues_ = other.skewedColValues_; + bitField0_ = (bitField0_ & ~0x00000002); + skewedColValuesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSkewedColValuesFieldBuilder() : null; + } else { + skewedColValuesBuilder_.addAllMessages(other.skewedColValues_); + } + } + } + if (skewedColValueLocationMapsBuilder_ == null) { + if (!other.skewedColValueLocationMaps_.isEmpty()) { + if (skewedColValueLocationMaps_.isEmpty()) { + skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.addAll(other.skewedColValueLocationMaps_); + } + onChanged(); + } + } else { + if (!other.skewedColValueLocationMaps_.isEmpty()) { + if (skewedColValueLocationMapsBuilder_.isEmpty()) { + skewedColValueLocationMapsBuilder_.dispose(); + skewedColValueLocationMapsBuilder_ = null; + skewedColValueLocationMaps_ = other.skewedColValueLocationMaps_; + bitField0_ = (bitField0_ & ~0x00000004); + skewedColValueLocationMapsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSkewedColValueLocationMapsFieldBuilder() : null; + } else { + skewedColValueLocationMapsBuilder_.addAllMessages(other.skewedColValueLocationMaps_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getSkewedColValueLocationMapsCount(); i++) { + if (!getSkewedColValueLocationMaps(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated string skewed_col_names = 1; + private com.google.protobuf.LazyStringList skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureSkewedColNamesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + skewedColNames_ = new com.google.protobuf.LazyStringArrayList(skewedColNames_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string skewed_col_names = 1; + */ + public java.util.List + getSkewedColNamesList() { + return java.util.Collections.unmodifiableList(skewedColNames_); + } + /** + * repeated string skewed_col_names = 1; + */ + public int getSkewedColNamesCount() { + return skewedColNames_.size(); + } + /** + * repeated string skewed_col_names = 1; + */ + public java.lang.String getSkewedColNames(int index) { + return skewedColNames_.get(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public com.google.protobuf.ByteString + getSkewedColNamesBytes(int index) { + return skewedColNames_.getByteString(index); + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder setSkewedColNames( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addSkewedColNames( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addAllSkewedColNames( + java.lang.Iterable values) { + ensureSkewedColNamesIsMutable(); + super.addAll(values, skewedColNames_); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder clearSkewedColNames() { + skewedColNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string skewed_col_names = 1; + */ + public Builder addSkewedColNamesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColNamesIsMutable(); + skewedColNames_.add(value); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + private java.util.List skewedColValues_ = + java.util.Collections.emptyList(); + private void ensureSkewedColValuesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + skewedColValues_ = new java.util.ArrayList(skewedColValues_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> skewedColValuesBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List getSkewedColValuesList() { + if (skewedColValuesBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedColValues_); + } else { + return skewedColValuesBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public int getSkewedColValuesCount() { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.size(); + } else { + return skewedColValuesBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList getSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); + } else { + return skewedColValuesBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder setSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder setSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList value) { + if (skewedColValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, value); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addSkewedColValues( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder builderForValue) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedColValuesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder addAllSkewedColValues( + java.lang.Iterable values) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + super.addAll(values, skewedColValues_); + onChanged(); + } else { + skewedColValuesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder clearSkewedColValues() { + if (skewedColValuesBuilder_ == null) { + skewedColValues_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + skewedColValuesBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public Builder removeSkewedColValues(int index) { + if (skewedColValuesBuilder_ == null) { + ensureSkewedColValuesIsMutable(); + skewedColValues_.remove(index); + onChanged(); + } else { + skewedColValuesBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder getSkewedColValuesBuilder( + int index) { + return getSkewedColValuesFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder getSkewedColValuesOrBuilder( + int index) { + if (skewedColValuesBuilder_ == null) { + return skewedColValues_.get(index); } else { + return skewedColValuesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesOrBuilderList() { + if (skewedColValuesBuilder_ != null) { + return skewedColValuesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedColValues_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder() { + return getSkewedColValuesFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder addSkewedColValuesBuilder( + int index) { + return getSkewedColValuesFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueList skewed_col_values = 2; + */ + public java.util.List + getSkewedColValuesBuilderList() { + return getSkewedColValuesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder> + getSkewedColValuesFieldBuilder() { + if (skewedColValuesBuilder_ == null) { + skewedColValuesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueListOrBuilder>( + skewedColValues_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + skewedColValues_ = null; + } + return skewedColValuesBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + private java.util.List skewedColValueLocationMaps_ = + java.util.Collections.emptyList(); + private void ensureSkewedColValueLocationMapsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + skewedColValueLocationMaps_ = new java.util.ArrayList(skewedColValueLocationMaps_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> skewedColValueLocationMapsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List getSkewedColValueLocationMapsList() { + if (skewedColValueLocationMapsBuilder_ == null) { + return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } else { + return skewedColValueLocationMapsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public int getSkewedColValueLocationMapsCount() { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.size(); + } else { + return skewedColValueLocationMapsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap getSkewedColValueLocationMaps(int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.get(index); + } else { + return skewedColValueLocationMapsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder setSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.set(index, value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder setSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.set(index, builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap value) { + if (skewedColValueLocationMapsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(index, value); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addSkewedColValueLocationMaps( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder builderForValue) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.add(index, builderForValue.build()); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder addAllSkewedColValueLocationMaps( + java.lang.Iterable values) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + super.addAll(values, skewedColValueLocationMaps_); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder clearSkewedColValueLocationMaps() { + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMaps_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public Builder removeSkewedColValueLocationMaps(int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + ensureSkewedColValueLocationMapsIsMutable(); + skewedColValueLocationMaps_.remove(index); + onChanged(); + } else { + skewedColValueLocationMapsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder getSkewedColValueLocationMapsBuilder( + int index) { + return getSkewedColValueLocationMapsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder getSkewedColValueLocationMapsOrBuilder( + int index) { + if (skewedColValueLocationMapsBuilder_ == null) { + return skewedColValueLocationMaps_.get(index); } else { + return skewedColValueLocationMapsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsOrBuilderList() { + if (skewedColValueLocationMapsBuilder_ != null) { + return skewedColValueLocationMapsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(skewedColValueLocationMaps_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder() { + return getSkewedColValueLocationMapsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder addSkewedColValueLocationMapsBuilder( + int index) { + return getSkewedColValueLocationMapsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap skewed_col_value_location_maps = 3; + */ + public java.util.List + getSkewedColValueLocationMapsBuilderList() { + return getSkewedColValueLocationMapsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder> + getSkewedColValueLocationMapsFieldBuilder() { + if (skewedColValueLocationMapsBuilder_ == null) { + skewedColValueLocationMapsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMapOrBuilder>( + skewedColValueLocationMaps_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + skewedColValueLocationMaps_ = null; + } + return skewedColValueLocationMapsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) + } + + static { + defaultInstance = new SkewedInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo) + } + + private int bitField0_; + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + public static final int COLS_FIELD_NUMBER = 1; + private java.util.List cols_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List getColsList() { + return cols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsOrBuilderList() { + return cols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public int getColsCount() { + return cols_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { + return cols_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index) { + return cols_.get(index); + } + + // optional string input_format = 2; + public static final int INPUT_FORMAT_FIELD_NUMBER = 2; + private java.lang.Object inputFormat_; + /** + * optional string input_format = 2; + */ + public boolean hasInputFormat() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string input_format = 2; + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + inputFormat_ = s; + } + return s; + } + } + /** + * optional string input_format = 2; + */ + public com.google.protobuf.ByteString + getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string output_format = 3; + public static final int OUTPUT_FORMAT_FIELD_NUMBER = 3; + private java.lang.Object outputFormat_; + /** + * optional string output_format = 3; + */ + public boolean hasOutputFormat() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string output_format = 3; + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + outputFormat_ = s; + } + return s; + } + } + /** + * optional string output_format = 3; + */ + public com.google.protobuf.ByteString + getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bool is_compressed = 4; + public static final int IS_COMPRESSED_FIELD_NUMBER = 4; + private boolean isCompressed_; + /** + * optional bool is_compressed = 4; + */ + public boolean hasIsCompressed() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool is_compressed = 4; + */ + public boolean getIsCompressed() { + return isCompressed_; + } + + // optional sint32 num_buckets = 5; + public static final int NUM_BUCKETS_FIELD_NUMBER = 5; + private int numBuckets_; + /** + * optional sint32 num_buckets = 5; + */ + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional sint32 num_buckets = 5; + */ + public int getNumBuckets() { + return numBuckets_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + public static final int SERDE_INFO_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { + return serdeInfo_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + return serdeInfo_; + } + + // repeated string bucket_cols = 7; + public static final int BUCKET_COLS_FIELD_NUMBER = 7; + private com.google.protobuf.LazyStringList bucketCols_; + /** + * repeated string bucket_cols = 7; + */ + public java.util.List + getBucketColsList() { + return bucketCols_; + } + /** + * repeated string bucket_cols = 7; + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + /** + * repeated string bucket_cols = 7; + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + /** + * repeated string bucket_cols = 7; + */ + public com.google.protobuf.ByteString + getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + public static final int SORT_COLS_FIELD_NUMBER = 8; + private java.util.List sortCols_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List getSortColsList() { + return sortCols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsOrBuilderList() { + return sortCols_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public int getSortColsCount() { + return sortCols_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { + return sortCols_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index) { + return sortCols_.get(index); + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + public static final int SKEWED_INFO_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { + return skewedInfo_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { + return skewedInfo_; + } + + // optional bool stored_as_sub_directories = 10; + public static final int STORED_AS_SUB_DIRECTORIES_FIELD_NUMBER = 10; + private boolean storedAsSubDirectories_; + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean hasStoredAsSubDirectories() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean getStoredAsSubDirectories() { + return storedAsSubDirectories_; + } + + private void initFields() { + cols_ = java.util.Collections.emptyList(); + inputFormat_ = ""; + outputFormat_ = ""; + isCompressed_ = false; + numBuckets_ = 0; + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + sortCols_ = java.util.Collections.emptyList(); + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + storedAsSubDirectories_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getColsCount(); i++) { + if (!getCols(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasSerdeInfo()) { + if (!getSerdeInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getSortColsCount(); i++) { + if (!getSortCols(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasSkewedInfo()) { + if (!getSkewedInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < cols_.size(); i++) { + output.writeMessage(1, cols_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(2, getInputFormatBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getOutputFormatBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(4, isCompressed_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeSInt32(5, numBuckets_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(6, serdeInfo_); + } + for (int i = 0; i < bucketCols_.size(); i++) { + output.writeBytes(7, bucketCols_.getByteString(i)); + } + for (int i = 0; i < sortCols_.size(); i++) { + output.writeMessage(8, sortCols_.get(i)); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(9, skewedInfo_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBool(10, storedAsSubDirectories_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < cols_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, cols_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getInputFormatBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getOutputFormatBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, isCompressed_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(5, numBuckets_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, serdeInfo_); + } + { + int dataSize = 0; + for (int i = 0; i < bucketCols_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(bucketCols_.getByteString(i)); + } + size += dataSize; + size += 1 * getBucketColsList().size(); + } + for (int i = 0; i < sortCols_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, sortCols_.get(i)); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, skewedInfo_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(10, storedAsSubDirectories_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getColsFieldBuilder(); + getSerdeInfoFieldBuilder(); + getSortColsFieldBuilder(); + getSkewedInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (colsBuilder_ == null) { + cols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + colsBuilder_.clear(); + } + inputFormat_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + outputFormat_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + isCompressed_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + numBuckets_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + if (serdeInfoBuilder_ == null) { + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + } else { + serdeInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + sortColsBuilder_.clear(); + } + if (skewedInfoBuilder_ == null) { + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + } else { + skewedInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + storedAsSubDirectories_ = false; + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (colsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = java.util.Collections.unmodifiableList(cols_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.cols_ = cols_; + } else { + result.cols_ = colsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + result.inputFormat_ = inputFormat_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.outputFormat_ = outputFormat_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.isCompressed_ = isCompressed_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.numBuckets_ = numBuckets_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + if (serdeInfoBuilder_ == null) { + result.serdeInfo_ = serdeInfo_; + } else { + result.serdeInfo_ = serdeInfoBuilder_.build(); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.UnmodifiableLazyStringList( + bucketCols_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.bucketCols_ = bucketCols_; + if (sortColsBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = java.util.Collections.unmodifiableList(sortCols_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.sortCols_ = sortCols_; + } else { + result.sortCols_ = sortColsBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000020; + } + if (skewedInfoBuilder_ == null) { + result.skewedInfo_ = skewedInfo_; + } else { + result.skewedInfo_ = skewedInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000040; + } + result.storedAsSubDirectories_ = storedAsSubDirectories_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.getDefaultInstance()) return this; + if (colsBuilder_ == null) { + if (!other.cols_.isEmpty()) { + if (cols_.isEmpty()) { + cols_ = other.cols_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureColsIsMutable(); + cols_.addAll(other.cols_); + } + onChanged(); + } + } else { + if (!other.cols_.isEmpty()) { + if (colsBuilder_.isEmpty()) { + colsBuilder_.dispose(); + colsBuilder_ = null; + cols_ = other.cols_; + bitField0_ = (bitField0_ & ~0x00000001); + colsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getColsFieldBuilder() : null; + } else { + colsBuilder_.addAllMessages(other.cols_); + } + } + } + if (other.hasInputFormat()) { + bitField0_ |= 0x00000002; + inputFormat_ = other.inputFormat_; + onChanged(); + } + if (other.hasOutputFormat()) { + bitField0_ |= 0x00000004; + outputFormat_ = other.outputFormat_; + onChanged(); + } + if (other.hasIsCompressed()) { + setIsCompressed(other.getIsCompressed()); + } + if (other.hasNumBuckets()) { + setNumBuckets(other.getNumBuckets()); + } + if (other.hasSerdeInfo()) { + mergeSerdeInfo(other.getSerdeInfo()); + } + if (!other.bucketCols_.isEmpty()) { + if (bucketCols_.isEmpty()) { + bucketCols_ = other.bucketCols_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureBucketColsIsMutable(); + bucketCols_.addAll(other.bucketCols_); + } + onChanged(); + } + if (sortColsBuilder_ == null) { + if (!other.sortCols_.isEmpty()) { + if (sortCols_.isEmpty()) { + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureSortColsIsMutable(); + sortCols_.addAll(other.sortCols_); + } + onChanged(); + } + } else { + if (!other.sortCols_.isEmpty()) { + if (sortColsBuilder_.isEmpty()) { + sortColsBuilder_.dispose(); + sortColsBuilder_ = null; + sortCols_ = other.sortCols_; + bitField0_ = (bitField0_ & ~0x00000080); + sortColsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getSortColsFieldBuilder() : null; + } else { + sortColsBuilder_.addAllMessages(other.sortCols_); + } + } + } + if (other.hasSkewedInfo()) { + mergeSkewedInfo(other.getSkewedInfo()); + } + if (other.hasStoredAsSubDirectories()) { + setStoredAsSubDirectories(other.getStoredAsSubDirectories()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getColsCount(); i++) { + if (!getCols(i).isInitialized()) { + + return false; + } + } + if (hasSerdeInfo()) { + if (!getSerdeInfo().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getSortColsCount(); i++) { + if (!getSortCols(i).isInitialized()) { + + return false; + } + } + if (hasSkewedInfo()) { + if (!getSkewedInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + private java.util.List cols_ = + java.util.Collections.emptyList(); + private void ensureColsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + cols_ = new java.util.ArrayList(cols_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> colsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List getColsList() { + if (colsBuilder_ == null) { + return java.util.Collections.unmodifiableList(cols_); + } else { + return colsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public int getColsCount() { + if (colsBuilder_ == null) { + return cols_.size(); + } else { + return colsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getCols(int index) { + if (colsBuilder_ == null) { + return cols_.get(index); + } else { + return colsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder setCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.set(index, value); + onChanged(); + } else { + colsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder setCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.set(index, builderForValue.build()); + onChanged(); + } else { + colsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.add(value); + onChanged(); + } else { + colsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (colsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColsIsMutable(); + cols_.add(index, value); + onChanged(); + } else { + colsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.add(builderForValue.build()); + onChanged(); + } else { + colsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.add(index, builderForValue.build()); + onChanged(); + } else { + colsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder addAllCols( + java.lang.Iterable values) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + super.addAll(values, cols_); + onChanged(); + } else { + colsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder clearCols() { + if (colsBuilder_ == null) { + cols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + colsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public Builder removeCols(int index) { + if (colsBuilder_ == null) { + ensureColsIsMutable(); + cols_.remove(index); + onChanged(); + } else { + colsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getColsBuilder( + int index) { + return getColsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getColsOrBuilder( + int index) { + if (colsBuilder_ == null) { + return cols_.get(index); } else { + return colsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsOrBuilderList() { + if (colsBuilder_ != null) { + return colsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(cols_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder() { + return getColsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addColsBuilder( + int index) { + return getColsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema cols = 1; + */ + public java.util.List + getColsBuilderList() { + return getColsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> + getColsFieldBuilder() { + if (colsBuilder_ == null) { + colsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( + cols_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + cols_ = null; + } + return colsBuilder_; + } + + // optional string input_format = 2; + private java.lang.Object inputFormat_ = ""; + /** + * optional string input_format = 2; + */ + public boolean hasInputFormat() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string input_format = 2; + */ + public java.lang.String getInputFormat() { + java.lang.Object ref = inputFormat_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + inputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string input_format = 2; + */ + public com.google.protobuf.ByteString + getInputFormatBytes() { + java.lang.Object ref = inputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + inputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string input_format = 2; + */ + public Builder setInputFormat( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + inputFormat_ = value; + onChanged(); + return this; + } + /** + * optional string input_format = 2; + */ + public Builder clearInputFormat() { + bitField0_ = (bitField0_ & ~0x00000002); + inputFormat_ = getDefaultInstance().getInputFormat(); + onChanged(); + return this; + } + /** + * optional string input_format = 2; + */ + public Builder setInputFormatBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + inputFormat_ = value; + onChanged(); + return this; + } + + // optional string output_format = 3; + private java.lang.Object outputFormat_ = ""; + /** + * optional string output_format = 3; + */ + public boolean hasOutputFormat() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string output_format = 3; + */ + public java.lang.String getOutputFormat() { + java.lang.Object ref = outputFormat_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + outputFormat_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string output_format = 3; + */ + public com.google.protobuf.ByteString + getOutputFormatBytes() { + java.lang.Object ref = outputFormat_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + outputFormat_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string output_format = 3; + */ + public Builder setOutputFormat( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + outputFormat_ = value; + onChanged(); + return this; + } + /** + * optional string output_format = 3; + */ + public Builder clearOutputFormat() { + bitField0_ = (bitField0_ & ~0x00000004); + outputFormat_ = getDefaultInstance().getOutputFormat(); + onChanged(); + return this; + } + /** + * optional string output_format = 3; + */ + public Builder setOutputFormatBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + outputFormat_ = value; + onChanged(); + return this; + } + + // optional bool is_compressed = 4; + private boolean isCompressed_ ; + /** + * optional bool is_compressed = 4; + */ + public boolean hasIsCompressed() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool is_compressed = 4; + */ + public boolean getIsCompressed() { + return isCompressed_; + } + /** + * optional bool is_compressed = 4; + */ + public Builder setIsCompressed(boolean value) { + bitField0_ |= 0x00000008; + isCompressed_ = value; + onChanged(); + return this; + } + /** + * optional bool is_compressed = 4; + */ + public Builder clearIsCompressed() { + bitField0_ = (bitField0_ & ~0x00000008); + isCompressed_ = false; + onChanged(); + return this; + } + + // optional sint32 num_buckets = 5; + private int numBuckets_ ; + /** + * optional sint32 num_buckets = 5; + */ + public boolean hasNumBuckets() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional sint32 num_buckets = 5; + */ + public int getNumBuckets() { + return numBuckets_; + } + /** + * optional sint32 num_buckets = 5; + */ + public Builder setNumBuckets(int value) { + bitField0_ |= 0x00000010; + numBuckets_ = value; + onChanged(); + return this; + } + /** + * optional sint32 num_buckets = 5; + */ + public Builder clearNumBuckets() { + bitField0_ = (bitField0_ & ~0x00000010); + numBuckets_ = 0; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> serdeInfoBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public boolean hasSerdeInfo() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo getSerdeInfo() { + if (serdeInfoBuilder_ == null) { + return serdeInfo_; + } else { + return serdeInfoBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder setSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serdeInfo_ = value; + onChanged(); + } else { + serdeInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder setSerdeInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder builderForValue) { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = builderForValue.build(); + onChanged(); + } else { + serdeInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder mergeSerdeInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo value) { + if (serdeInfoBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + serdeInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance()) { + serdeInfo_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(serdeInfo_).mergeFrom(value).buildPartial(); + } else { + serdeInfo_ = value; + } + onChanged(); + } else { + serdeInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public Builder clearSerdeInfo() { + if (serdeInfoBuilder_ == null) { + serdeInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.getDefaultInstance(); + onChanged(); + } else { + serdeInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder getSerdeInfoBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getSerdeInfoFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder getSerdeInfoOrBuilder() { + if (serdeInfoBuilder_ != null) { + return serdeInfoBuilder_.getMessageOrBuilder(); + } else { + return serdeInfo_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SerDeInfo serde_info = 6; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder> + getSerdeInfoFieldBuilder() { + if (serdeInfoBuilder_ == null) { + serdeInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SerDeInfoOrBuilder>( + serdeInfo_, + getParentForChildren(), + isClean()); + serdeInfo_ = null; + } + return serdeInfoBuilder_; + } + + // repeated string bucket_cols = 7; + private com.google.protobuf.LazyStringList bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureBucketColsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + bucketCols_ = new com.google.protobuf.LazyStringArrayList(bucketCols_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated string bucket_cols = 7; + */ + public java.util.List + getBucketColsList() { + return java.util.Collections.unmodifiableList(bucketCols_); + } + /** + * repeated string bucket_cols = 7; + */ + public int getBucketColsCount() { + return bucketCols_.size(); + } + /** + * repeated string bucket_cols = 7; + */ + public java.lang.String getBucketCols(int index) { + return bucketCols_.get(index); + } + /** + * repeated string bucket_cols = 7; + */ + public com.google.protobuf.ByteString + getBucketColsBytes(int index) { + return bucketCols_.getByteString(index); + } + /** + * repeated string bucket_cols = 7; + */ + public Builder setBucketCols( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addBucketCols( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.add(value); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addAllBucketCols( + java.lang.Iterable values) { + ensureBucketColsIsMutable(); + super.addAll(values, bucketCols_); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder clearBucketCols() { + bucketCols_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + /** + * repeated string bucket_cols = 7; + */ + public Builder addBucketColsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketColsIsMutable(); + bucketCols_.add(value); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + private java.util.List sortCols_ = + java.util.Collections.emptyList(); + private void ensureSortColsIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + sortCols_ = new java.util.ArrayList(sortCols_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> sortColsBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List getSortColsList() { + if (sortColsBuilder_ == null) { + return java.util.Collections.unmodifiableList(sortCols_); + } else { + return sortColsBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public int getSortColsCount() { + if (sortColsBuilder_ == null) { + return sortCols_.size(); + } else { + return sortColsBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order getSortCols(int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); + } else { + return sortColsBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder setSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.set(index, value); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder setSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.set(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(value); + onChanged(); + } else { + sortColsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order value) { + if (sortColsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureSortColsIsMutable(); + sortCols_.add(index, value); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addSortCols( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder builderForValue) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.add(index, builderForValue.build()); + onChanged(); + } else { + sortColsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder addAllSortCols( + java.lang.Iterable values) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + super.addAll(values, sortCols_); + onChanged(); + } else { + sortColsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder clearSortCols() { + if (sortColsBuilder_ == null) { + sortCols_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + sortColsBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public Builder removeSortCols(int index) { + if (sortColsBuilder_ == null) { + ensureSortColsIsMutable(); + sortCols_.remove(index); + onChanged(); + } else { + sortColsBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder getSortColsBuilder( + int index) { + return getSortColsFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder getSortColsOrBuilder( + int index) { + if (sortColsBuilder_ == null) { + return sortCols_.get(index); } else { + return sortColsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsOrBuilderList() { + if (sortColsBuilder_ != null) { + return sortColsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(sortCols_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder() { + return getSortColsFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder addSortColsBuilder( + int index) { + return getSortColsFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order sort_cols = 8; + */ + public java.util.List + getSortColsBuilderList() { + return getSortColsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder> + getSortColsFieldBuilder() { + if (sortColsBuilder_ == null) { + sortColsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.Order.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.OrderOrBuilder>( + sortCols_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + sortCols_ = null; + } + return sortColsBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> skewedInfoBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public boolean hasSkewedInfo() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo getSkewedInfo() { + if (skewedInfoBuilder_ == null) { + return skewedInfo_; + } else { + return skewedInfoBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder setSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + skewedInfo_ = value; + onChanged(); + } else { + skewedInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder setSkewedInfo( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder builderForValue) { + if (skewedInfoBuilder_ == null) { + skewedInfo_ = builderForValue.build(); + onChanged(); + } else { + skewedInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder mergeSkewedInfo(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo value) { + if (skewedInfoBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + skewedInfo_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance()) { + skewedInfo_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(skewedInfo_).mergeFrom(value).buildPartial(); + } else { + skewedInfo_ = value; + } + onChanged(); + } else { + skewedInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public Builder clearSkewedInfo() { + if (skewedInfoBuilder_ == null) { + skewedInfo_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.getDefaultInstance(); + onChanged(); + } else { + skewedInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder getSkewedInfoBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getSkewedInfoFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder getSkewedInfoOrBuilder() { + if (skewedInfoBuilder_ != null) { + return skewedInfoBuilder_.getMessageOrBuilder(); + } else { + return skewedInfo_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.SkewedInfo skewed_info = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder> + getSkewedInfoFieldBuilder() { + if (skewedInfoBuilder_ == null) { + skewedInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.StorageDescriptor.SkewedInfoOrBuilder>( + skewedInfo_, + getParentForChildren(), + isClean()); + skewedInfo_ = null; + } + return skewedInfoBuilder_; + } + + // optional bool stored_as_sub_directories = 10; + private boolean storedAsSubDirectories_ ; + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean hasStoredAsSubDirectories() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public boolean getStoredAsSubDirectories() { + return storedAsSubDirectories_; + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public Builder setStoredAsSubDirectories(boolean value) { + bitField0_ |= 0x00000200; + storedAsSubDirectories_ = value; + onChanged(); + return this; + } + /** + * optional bool stored_as_sub_directories = 10; + */ + public Builder clearStoredAsSubDirectories() { + bitField0_ = (bitField0_ & ~0x00000200); + storedAsSubDirectories_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) + } + + static { + defaultInstance = new StorageDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor) + } + + public interface TableOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string owner = 1; + /** + * optional string owner = 1; + */ + boolean hasOwner(); + /** + * optional string owner = 1; + */ + java.lang.String getOwner(); + /** + * optional string owner = 1; + */ + com.google.protobuf.ByteString + getOwnerBytes(); + + // optional int64 create_time = 2; + /** + * optional int64 create_time = 2; + */ + boolean hasCreateTime(); + /** + * optional int64 create_time = 2; + */ + long getCreateTime(); + + // optional int64 last_access_time = 3; + /** + * optional int64 last_access_time = 3; + */ + boolean hasLastAccessTime(); + /** + * optional int64 last_access_time = 3; + */ + long getLastAccessTime(); + + // optional int64 retention = 4; + /** + * optional int64 retention = 4; + */ + boolean hasRetention(); + /** + * optional int64 retention = 4; + */ + long getRetention(); + + // optional string location = 5; + /** + * optional string location = 5; + */ + boolean hasLocation(); + /** + * optional string location = 5; + */ + java.lang.String getLocation(); + /** + * optional string location = 5; + */ + com.google.protobuf.ByteString + getLocationBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + boolean hasSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder(); + + // required bytes sd_hash = 7; + /** + * required bytes sd_hash = 7; + */ + boolean hasSdHash(); + /** + * required bytes sd_hash = 7; + */ + com.google.protobuf.ByteString getSdHash(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + java.util.List + getPartitionKeysList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + int getPartitionKeysCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + java.util.List + getPartitionKeysOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index); + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + boolean hasParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); + + // optional string view_original_text = 10; + /** + * optional string view_original_text = 10; + */ + boolean hasViewOriginalText(); + /** + * optional string view_original_text = 10; + */ + java.lang.String getViewOriginalText(); + /** + * optional string view_original_text = 10; + */ + com.google.protobuf.ByteString + getViewOriginalTextBytes(); + + // optional string view_expanded_text = 11; + /** + * optional string view_expanded_text = 11; + */ + boolean hasViewExpandedText(); + /** + * optional string view_expanded_text = 11; + */ + java.lang.String getViewExpandedText(); + /** + * optional string view_expanded_text = 11; + */ + com.google.protobuf.ByteString + getViewExpandedTextBytes(); + + // optional string table_type = 12; + /** + * optional string table_type = 12; + */ + boolean hasTableType(); + /** + * optional string table_type = 12; + */ + java.lang.String getTableType(); + /** + * optional string table_type = 12; + */ + com.google.protobuf.ByteString + getTableTypeBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + boolean hasPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder(); + + // optional bool is_temporary = 14; + /** + * optional bool is_temporary = 14; + */ + boolean hasIsTemporary(); + /** + * optional bool is_temporary = 14; + */ + boolean getIsTemporary(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} + */ + public static final class Table extends + com.google.protobuf.GeneratedMessage + implements TableOrBuilder { + // Use Table.newBuilder() to construct. + private Table(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Table defaultInstance; + public static Table getDefaultInstance() { + return defaultInstance; + } + + public Table getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Table( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + owner_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + createTime_ = input.readInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastAccessTime_ = input.readInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + retention_ = input.readInt64(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + location_ = input.readBytes(); + break; + } + case 50: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + subBuilder = sdParameters_.toBuilder(); + } + sdParameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sdParameters_); + sdParameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000020; + break; + } + case 58: { + bitField0_ |= 0x00000040; + sdHash_ = input.readBytes(); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + partitionKeys_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.PARSER, extensionRegistry)); + break; + } + case 74: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = parameters_.toBuilder(); + } + parameters_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(parameters_); + parameters_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } + case 82: { + bitField0_ |= 0x00000100; + viewOriginalText_ = input.readBytes(); + break; + } + case 90: { + bitField0_ |= 0x00000200; + viewExpandedText_ = input.readBytes(); + break; + } + case 98: { + bitField0_ |= 0x00000400; + tableType_ = input.readBytes(); + break; + } + case 106: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; + if (((bitField0_ & 0x00000800) == 0x00000800)) { + subBuilder = privileges_.toBuilder(); + } + privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(privileges_); + privileges_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000800; + break; + } + case 112: { + bitField0_ |= 0x00001000; + isTemporary_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); + } + + public static com.google.protobuf.Parser
PARSER = + new com.google.protobuf.AbstractParser
() { + public Table parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Table(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser
getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string owner = 1; + public static final int OWNER_FIELD_NUMBER = 1; + private java.lang.Object owner_; + /** + * optional string owner = 1; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string owner = 1; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + owner_ = s; + } + return s; + } + } + /** + * optional string owner = 1; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 create_time = 2; + public static final int CREATE_TIME_FIELD_NUMBER = 2; + private long createTime_; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + + // optional int64 last_access_time = 3; + public static final int LAST_ACCESS_TIME_FIELD_NUMBER = 3; + private long lastAccessTime_; + /** + * optional int64 last_access_time = 3; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 last_access_time = 3; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + + // optional int64 retention = 4; + public static final int RETENTION_FIELD_NUMBER = 4; + private long retention_; + /** + * optional int64 retention = 4; + */ + public boolean hasRetention() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 retention = 4; + */ + public long getRetention() { + return retention_; + } + + // optional string location = 5; + public static final int LOCATION_FIELD_NUMBER = 5; + private java.lang.Object location_; + /** + * optional string location = 5; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string location = 5; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + location_ = s; + } + return s; + } + } + /** + * optional string location = 5; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + public static final int SD_PARAMETERS_FIELD_NUMBER = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + return sdParameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+     * storage descriptor parameters
+     * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + return sdParameters_; + } + + // required bytes sd_hash = 7; + public static final int SD_HASH_FIELD_NUMBER = 7; + private com.google.protobuf.ByteString sdHash_; + /** + * required bytes sd_hash = 7; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required bytes sd_hash = 7; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + public static final int PARTITION_KEYS_FIELD_NUMBER = 8; + private java.util.List partitionKeys_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List getPartitionKeysList() { + return partitionKeys_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysOrBuilderList() { + return partitionKeys_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public int getPartitionKeysCount() { + return partitionKeys_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { + return partitionKeys_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + return partitionKeys_.get(index); + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + public static final int PARAMETERS_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + return parameters_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + return parameters_; + } + + // optional string view_original_text = 10; + public static final int VIEW_ORIGINAL_TEXT_FIELD_NUMBER = 10; + private java.lang.Object viewOriginalText_; + /** + * optional string view_original_text = 10; + */ + public boolean hasViewOriginalText() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional string view_original_text = 10; + */ + public java.lang.String getViewOriginalText() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewOriginalText_ = s; + } + return s; + } + } + /** + * optional string view_original_text = 10; + */ + public com.google.protobuf.ByteString + getViewOriginalTextBytes() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewOriginalText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string view_expanded_text = 11; + public static final int VIEW_EXPANDED_TEXT_FIELD_NUMBER = 11; + private java.lang.Object viewExpandedText_; + /** + * optional string view_expanded_text = 11; + */ + public boolean hasViewExpandedText() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string view_expanded_text = 11; + */ + public java.lang.String getViewExpandedText() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewExpandedText_ = s; + } + return s; + } + } + /** + * optional string view_expanded_text = 11; + */ + public com.google.protobuf.ByteString + getViewExpandedTextBytes() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewExpandedText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string table_type = 12; + public static final int TABLE_TYPE_FIELD_NUMBER = 12; + private java.lang.Object tableType_; + /** + * optional string table_type = 12; + */ + public boolean hasTableType() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string table_type = 12; + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableType_ = s; + } + return s; + } + } + /** + * optional string table_type = 12; + */ + public com.google.protobuf.ByteString + getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + public static final int PRIVILEGES_FIELD_NUMBER = 13; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + return privileges_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + return privileges_; + } + + // optional bool is_temporary = 14; + public static final int IS_TEMPORARY_FIELD_NUMBER = 14; + private boolean isTemporary_; + /** + * optional bool is_temporary = 14; + */ + public boolean hasIsTemporary() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional bool is_temporary = 14; + */ + public boolean getIsTemporary() { + return isTemporary_; + } + + private void initFields() { + owner_ = ""; + createTime_ = 0L; + lastAccessTime_ = 0L; + retention_ = 0L; + location_ = ""; + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + partitionKeys_ = java.util.Collections.emptyList(); + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + viewOriginalText_ = ""; + viewExpandedText_ = ""; + tableType_ = ""; + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + isTemporary_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSdHash()) { + memoizedIsInitialized = 0; + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getPartitionKeysCount(); i++) { + if (!getPartitionKeys(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getOwnerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeInt64(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, lastAccessTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(4, retention_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getLocationBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeMessage(6, sdParameters_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBytes(7, sdHash_); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + output.writeMessage(8, partitionKeys_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(9, parameters_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeBytes(10, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(12, getTableTypeBytes()); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeMessage(13, privileges_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + output.writeBool(14, isTemporary_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getOwnerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(2, createTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, lastAccessTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, retention_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getLocationBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, sdParameters_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(7, sdHash_); + } + for (int i = 0; i < partitionKeys_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, partitionKeys_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, parameters_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(10, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(12, getTableTypeBytes()); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(13, privileges_); + } + if (((bitField0_ & 0x00001000) == 0x00001000)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(14, isTemporary_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.Table} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.TableOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSdParametersFieldBuilder(); + getPartitionKeysFieldBuilder(); + getParametersFieldBuilder(); + getPrivilegesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + owner_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + createTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastAccessTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + retention_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + location_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + sdHash_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + partitionKeysBuilder_.clear(); + } + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + viewOriginalText_ = ""; + bitField0_ = (bitField0_ & ~0x00000200); + viewExpandedText_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + tableType_ = ""; + bitField0_ = (bitField0_ & ~0x00000800); + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00001000); + isTemporary_ = false; + bitField0_ = (bitField0_ & ~0x00002000); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.owner_ = owner_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.createTime_ = createTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastAccessTime_ = lastAccessTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.retention_ = retention_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.location_ = location_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + if (sdParametersBuilder_ == null) { + result.sdParameters_ = sdParameters_; + } else { + result.sdParameters_ = sdParametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.sdHash_ = sdHash_; + if (partitionKeysBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = java.util.Collections.unmodifiableList(partitionKeys_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.partitionKeys_ = partitionKeys_; + } else { + result.partitionKeys_ = partitionKeysBuilder_.build(); + } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + if (parametersBuilder_ == null) { + result.parameters_ = parameters_; + } else { + result.parameters_ = parametersBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.viewOriginalText_ = viewOriginalText_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.viewExpandedText_ = viewExpandedText_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.tableType_ = tableType_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; + } + if (privilegesBuilder_ == null) { + result.privileges_ = privileges_; + } else { + result.privileges_ = privilegesBuilder_.build(); + } + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { + to_bitField0_ |= 0x00001000; + } + result.isTemporary_ = isTemporary_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table.getDefaultInstance()) return this; + if (other.hasOwner()) { + bitField0_ |= 0x00000001; + owner_ = other.owner_; + onChanged(); + } + if (other.hasCreateTime()) { + setCreateTime(other.getCreateTime()); + } + if (other.hasLastAccessTime()) { + setLastAccessTime(other.getLastAccessTime()); + } + if (other.hasRetention()) { + setRetention(other.getRetention()); + } + if (other.hasLocation()) { + bitField0_ |= 0x00000010; + location_ = other.location_; + onChanged(); + } + if (other.hasSdParameters()) { + mergeSdParameters(other.getSdParameters()); + } + if (other.hasSdHash()) { + setSdHash(other.getSdHash()); + } + if (partitionKeysBuilder_ == null) { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeys_.isEmpty()) { + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensurePartitionKeysIsMutable(); + partitionKeys_.addAll(other.partitionKeys_); + } + onChanged(); + } + } else { + if (!other.partitionKeys_.isEmpty()) { + if (partitionKeysBuilder_.isEmpty()) { + partitionKeysBuilder_.dispose(); + partitionKeysBuilder_ = null; + partitionKeys_ = other.partitionKeys_; + bitField0_ = (bitField0_ & ~0x00000080); + partitionKeysBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getPartitionKeysFieldBuilder() : null; + } else { + partitionKeysBuilder_.addAllMessages(other.partitionKeys_); + } + } + } + if (other.hasParameters()) { + mergeParameters(other.getParameters()); + } + if (other.hasViewOriginalText()) { + bitField0_ |= 0x00000200; + viewOriginalText_ = other.viewOriginalText_; + onChanged(); + } + if (other.hasViewExpandedText()) { + bitField0_ |= 0x00000400; + viewExpandedText_ = other.viewExpandedText_; + onChanged(); + } + if (other.hasTableType()) { + bitField0_ |= 0x00000800; + tableType_ = other.tableType_; + onChanged(); + } + if (other.hasPrivileges()) { + mergePrivileges(other.getPrivileges()); + } + if (other.hasIsTemporary()) { + setIsTemporary(other.getIsTemporary()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSdHash()) { + + return false; + } + if (hasSdParameters()) { + if (!getSdParameters().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getPartitionKeysCount(); i++) { + if (!getPartitionKeys(i).isInitialized()) { + + return false; + } + } + if (hasParameters()) { + if (!getParameters().isInitialized()) { + + return false; + } + } + if (hasPrivileges()) { + if (!getPrivileges().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Table) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string owner = 1; + private java.lang.Object owner_ = ""; + /** + * optional string owner = 1; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string owner = 1; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + owner_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner = 1; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner = 1; + */ + public Builder setOwner( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + owner_ = value; + onChanged(); + return this; + } + /** + * optional string owner = 1; + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00000001); + owner_ = getDefaultInstance().getOwner(); + onChanged(); + return this; + } + /** + * optional string owner = 1; + */ + public Builder setOwnerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + owner_ = value; + onChanged(); + return this; + } + + // optional int64 create_time = 2; + private long createTime_ ; + /** + * optional int64 create_time = 2; + */ + public boolean hasCreateTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional int64 create_time = 2; + */ + public long getCreateTime() { + return createTime_; + } + /** + * optional int64 create_time = 2; + */ + public Builder setCreateTime(long value) { + bitField0_ |= 0x00000002; + createTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 create_time = 2; + */ + public Builder clearCreateTime() { + bitField0_ = (bitField0_ & ~0x00000002); + createTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 last_access_time = 3; + private long lastAccessTime_ ; + /** + * optional int64 last_access_time = 3; + */ + public boolean hasLastAccessTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 last_access_time = 3; + */ + public long getLastAccessTime() { + return lastAccessTime_; + } + /** + * optional int64 last_access_time = 3; + */ + public Builder setLastAccessTime(long value) { + bitField0_ |= 0x00000004; + lastAccessTime_ = value; + onChanged(); + return this; + } + /** + * optional int64 last_access_time = 3; + */ + public Builder clearLastAccessTime() { + bitField0_ = (bitField0_ & ~0x00000004); + lastAccessTime_ = 0L; + onChanged(); + return this; + } + + // optional int64 retention = 4; + private long retention_ ; + /** + * optional int64 retention = 4; + */ + public boolean hasRetention() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 retention = 4; + */ + public long getRetention() { + return retention_; + } + /** + * optional int64 retention = 4; + */ + public Builder setRetention(long value) { + bitField0_ |= 0x00000008; + retention_ = value; + onChanged(); + return this; + } + /** + * optional int64 retention = 4; + */ + public Builder clearRetention() { + bitField0_ = (bitField0_ & ~0x00000008); + retention_ = 0L; + onChanged(); + return this; + } + + // optional string location = 5; + private java.lang.Object location_ = ""; + /** + * optional string location = 5; + */ + public boolean hasLocation() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string location = 5; + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string location = 5; + */ + public com.google.protobuf.ByteString + getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string location = 5; + */ + public Builder setLocation( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + location_ = value; + onChanged(); + return this; + } + /** + * optional string location = 5; + */ + public Builder clearLocation() { + bitField0_ = (bitField0_ & ~0x00000010); + location_ = getDefaultInstance().getLocation(); + onChanged(); + return this; + } + /** + * optional string location = 5; + */ + public Builder setLocationBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + location_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> sdParametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public boolean hasSdParameters() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getSdParameters() { + if (sdParametersBuilder_ == null) { + return sdParameters_; + } else { + return sdParametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sdParameters_ = value; + onChanged(); + } else { + sdParametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder setSdParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (sdParametersBuilder_ == null) { + sdParameters_ = builderForValue.build(); + onChanged(); + } else { + sdParametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder mergeSdParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (sdParametersBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + sdParameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + sdParameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(sdParameters_).mergeFrom(value).buildPartial(); + } else { + sdParameters_ = value; + } + onChanged(); + } else { + sdParametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public Builder clearSdParameters() { + if (sdParametersBuilder_ == null) { + sdParameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + sdParametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getSdParametersBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getSdParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getSdParametersOrBuilder() { + if (sdParametersBuilder_ != null) { + return sdParametersBuilder_.getMessageOrBuilder(); + } else { + return sdParameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters sd_parameters = 6; + * + *
+       * storage descriptor parameters
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getSdParametersFieldBuilder() { + if (sdParametersBuilder_ == null) { + sdParametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + sdParameters_, + getParentForChildren(), + isClean()); + sdParameters_ = null; + } + return sdParametersBuilder_; + } + + // required bytes sd_hash = 7; + private com.google.protobuf.ByteString sdHash_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes sd_hash = 7; + */ + public boolean hasSdHash() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required bytes sd_hash = 7; + */ + public com.google.protobuf.ByteString getSdHash() { + return sdHash_; + } + /** + * required bytes sd_hash = 7; + */ + public Builder setSdHash(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + sdHash_ = value; + onChanged(); + return this; + } + /** + * required bytes sd_hash = 7; + */ + public Builder clearSdHash() { + bitField0_ = (bitField0_ & ~0x00000040); + sdHash_ = getDefaultInstance().getSdHash(); + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + private java.util.List partitionKeys_ = + java.util.Collections.emptyList(); + private void ensurePartitionKeysIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + partitionKeys_ = new java.util.ArrayList(partitionKeys_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> partitionKeysBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List getPartitionKeysList() { + if (partitionKeysBuilder_ == null) { + return java.util.Collections.unmodifiableList(partitionKeys_); + } else { + return partitionKeysBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public int getPartitionKeysCount() { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.size(); + } else { + return partitionKeysBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema getPartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); + } else { + return partitionKeysBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder setPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, value); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder setPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.set(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema value) { + if (partitionKeysBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, value); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addPartitionKeys( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder builderForValue) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.add(index, builderForValue.build()); + onChanged(); + } else { + partitionKeysBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder addAllPartitionKeys( + java.lang.Iterable values) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + super.addAll(values, partitionKeys_); + onChanged(); + } else { + partitionKeysBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder clearPartitionKeys() { + if (partitionKeysBuilder_ == null) { + partitionKeys_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + partitionKeysBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public Builder removePartitionKeys(int index) { + if (partitionKeysBuilder_ == null) { + ensurePartitionKeysIsMutable(); + partitionKeys_.remove(index); + onChanged(); + } else { + partitionKeysBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder getPartitionKeysBuilder( + int index) { + return getPartitionKeysFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder getPartitionKeysOrBuilder( + int index) { + if (partitionKeysBuilder_ == null) { + return partitionKeys_.get(index); } else { + return partitionKeysBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysOrBuilderList() { + if (partitionKeysBuilder_ != null) { + return partitionKeysBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(partitionKeys_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder() { + return getPartitionKeysFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder addPartitionKeysBuilder( + int index) { + return getPartitionKeysFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.FieldSchema partition_keys = 8; + */ + public java.util.List + getPartitionKeysBuilderList() { + return getPartitionKeysFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder> + getPartitionKeysFieldBuilder() { + if (partitionKeysBuilder_ == null) { + partitionKeysBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchema.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.FieldSchemaOrBuilder>( + partitionKeys_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + partitionKeys_ = null; + } + return partitionKeysBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> parametersBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public boolean hasParameters() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters getParameters() { + if (parametersBuilder_ == null) { + return parameters_; + } else { + return parametersBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder setParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + parameters_ = value; + onChanged(); + } else { + parametersBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder setParameters( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder builderForValue) { + if (parametersBuilder_ == null) { + parameters_ = builderForValue.build(); + onChanged(); + } else { + parametersBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder mergeParameters(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters value) { + if (parametersBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + parameters_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance()) { + parameters_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.newBuilder(parameters_).mergeFrom(value).buildPartial(); + } else { + parameters_ = value; + } + onChanged(); + } else { + parametersBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public Builder clearParameters() { + if (parametersBuilder_ == null) { + parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); + onChanged(); + } else { + parametersBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder getParametersBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getParametersFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder() { + if (parametersBuilder_ != null) { + return parametersBuilder_.getMessageOrBuilder(); + } else { + return parameters_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.Parameters parameters = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder> + getParametersFieldBuilder() { + if (parametersBuilder_ == null) { + parametersBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder>( + parameters_, + getParentForChildren(), + isClean()); + parameters_ = null; + } + return parametersBuilder_; + } + + // optional string view_original_text = 10; + private java.lang.Object viewOriginalText_ = ""; + /** + * optional string view_original_text = 10; + */ + public boolean hasViewOriginalText() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string view_original_text = 10; + */ + public java.lang.String getViewOriginalText() { + java.lang.Object ref = viewOriginalText_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + viewOriginalText_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string view_original_text = 10; + */ + public com.google.protobuf.ByteString + getViewOriginalTextBytes() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewOriginalText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string view_original_text = 10; + */ + public Builder setViewOriginalText( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + viewOriginalText_ = value; + onChanged(); + return this; + } + /** + * optional string view_original_text = 10; + */ + public Builder clearViewOriginalText() { + bitField0_ = (bitField0_ & ~0x00000200); + viewOriginalText_ = getDefaultInstance().getViewOriginalText(); + onChanged(); + return this; + } + /** + * optional string view_original_text = 10; + */ + public Builder setViewOriginalTextBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + viewOriginalText_ = value; + onChanged(); + return this; + } + + // optional string view_expanded_text = 11; + private java.lang.Object viewExpandedText_ = ""; + /** + * optional string view_expanded_text = 11; + */ + public boolean hasViewExpandedText() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string view_expanded_text = 11; + */ + public java.lang.String getViewExpandedText() { + java.lang.Object ref = viewExpandedText_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + viewExpandedText_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string view_expanded_text = 11; + */ + public com.google.protobuf.ByteString + getViewExpandedTextBytes() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewExpandedText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string view_expanded_text = 11; + */ + public Builder setViewExpandedText( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + viewExpandedText_ = value; + onChanged(); + return this; + } + /** + * optional string view_expanded_text = 11; + */ + public Builder clearViewExpandedText() { + bitField0_ = (bitField0_ & ~0x00000400); + viewExpandedText_ = getDefaultInstance().getViewExpandedText(); + onChanged(); + return this; + } + /** + * optional string view_expanded_text = 11; + */ + public Builder setViewExpandedTextBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + viewExpandedText_ = value; + onChanged(); + return this; + } + + // optional string table_type = 12; + private java.lang.Object tableType_ = ""; + /** + * optional string table_type = 12; + */ + public boolean hasTableType() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional string table_type = 12; + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string table_type = 12; + */ + public com.google.protobuf.ByteString + getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table_type = 12; + */ + public Builder setTableType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + tableType_ = value; + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder clearTableType() { + bitField0_ = (bitField0_ & ~0x00000800); + tableType_ = getDefaultInstance().getTableType(); + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder setTableTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + tableType_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00001000) == 0x00001000) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00001000; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00001000); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00001000; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // optional bool is_temporary = 14; + private boolean isTemporary_ ; + /** + * optional bool is_temporary = 14; + */ + public boolean hasIsTemporary() { + return ((bitField0_ & 0x00002000) == 0x00002000); + } + /** + * optional bool is_temporary = 14; + */ + public boolean getIsTemporary() { + return isTemporary_; + } + /** + * optional bool is_temporary = 14; + */ + public Builder setIsTemporary(boolean value) { + bitField0_ |= 0x00002000; + isTemporary_ = value; + onChanged(); + return this; + } + /** + * optional bool is_temporary = 14; + */ + public Builder clearIsTemporary() { + bitField0_ = (bitField0_ & ~0x00002000); + isTemporary_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + static { + defaultInstance = new Table(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + public interface PartitionKeyComparatorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string names = 1; + /** + * required string names = 1; + */ + boolean hasNames(); + /** + * required string names = 1; + */ + java.lang.String getNames(); + /** + * required string names = 1; + */ + com.google.protobuf.ByteString + getNamesBytes(); + + // required string types = 2; + /** + * required string types = 2; + */ + boolean hasTypes(); + /** + * required string types = 2; + */ + java.lang.String getTypes(); + /** + * required string types = 2; + */ + com.google.protobuf.ByteString + getTypesBytes(); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + java.util.List + getOpList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + int getOpCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + java.util.List + getOpOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( + int index); + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + java.util.List + getRangeList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + int getRangeCount(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + java.util.List + getRangeOrBuilderList(); + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( + int index); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator} + */ + public static final class PartitionKeyComparator extends + com.google.protobuf.GeneratedMessage + implements PartitionKeyComparatorOrBuilder { + // Use PartitionKeyComparator.newBuilder() to construct. + private PartitionKeyComparator(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private PartitionKeyComparator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PartitionKeyComparator defaultInstance; + public static PartitionKeyComparator getDefaultInstance() { + return defaultInstance; + } + + public PartitionKeyComparator getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PartitionKeyComparator( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + names_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + types_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + op_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + op_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + range_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + range_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + op_ = java.util.Collections.unmodifiableList(op_); + } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + range_ = java.util.Collections.unmodifiableList(range_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PartitionKeyComparator parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PartitionKeyComparator(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface MarkOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string value = 1; + /** + * required string value = 1; + */ + boolean hasValue(); + /** + * required string value = 1; + */ + java.lang.String getValue(); + /** + * required string value = 1; + */ + com.google.protobuf.ByteString + getValueBytes(); + + // required bool inclusive = 2; + /** + * required bool inclusive = 2; + */ + boolean hasInclusive(); + /** + * required bool inclusive = 2; + */ + boolean getInclusive(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark} + */ + public static final class Mark extends + com.google.protobuf.GeneratedMessage + implements MarkOrBuilder { + // Use Mark.newBuilder() to construct. + private Mark(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Mark(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Mark defaultInstance; + public static Mark getDefaultInstance() { + return defaultInstance; + } + + public Mark getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Mark( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + value_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + inclusive_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Mark parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Mark(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string value = 1; + public static final int VALUE_FIELD_NUMBER = 1; + private java.lang.Object value_; + /** + * required string value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string value = 1; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * required string value = 1; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required bool inclusive = 2; + public static final int INCLUSIVE_FIELD_NUMBER = 2; + private boolean inclusive_; + /** + * required bool inclusive = 2; + */ + public boolean hasInclusive() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool inclusive = 2; + */ + public boolean getInclusive() { + return inclusive_; + } + + private void initFields() { + value_ = ""; + inclusive_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasInclusive()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getValueBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, inclusive_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getValueBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, inclusive_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + inclusive_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.inclusive_ = inclusive_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) return this; + if (other.hasValue()) { + bitField0_ |= 0x00000001; + value_ = other.value_; + onChanged(); + } + if (other.hasInclusive()) { + setInclusive(other.getInclusive()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasValue()) { + + return false; + } + if (!hasInclusive()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string value = 1; + private java.lang.Object value_ = ""; + /** + * required string value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string value = 1; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string value = 1; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string value = 1; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + /** + * required string value = 1; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * required string value = 1; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + + // required bool inclusive = 2; + private boolean inclusive_ ; + /** + * required bool inclusive = 2; + */ + public boolean hasInclusive() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool inclusive = 2; + */ + public boolean getInclusive() { + return inclusive_; + } + /** + * required bool inclusive = 2; + */ + public Builder setInclusive(boolean value) { + bitField0_ |= 0x00000002; + inclusive_ = value; + onChanged(); + return this; + } + /** + * required bool inclusive = 2; + */ + public Builder clearInclusive() { + bitField0_ = (bitField0_ & ~0x00000002); + inclusive_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark) + } + + static { + defaultInstance = new Mark(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark) + } + + public interface RangeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string key = 1; + /** + * required string key = 1; + */ + boolean hasKey(); + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + boolean hasStart(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder(); + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + boolean hasEnd(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd(); + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range} + */ + public static final class Range extends + com.google.protobuf.GeneratedMessage + implements RangeOrBuilder { + // Use Range.newBuilder() to construct. + private Range(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Range(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Range defaultInstance; + public static Range getDefaultInstance() { + return defaultInstance; + } + + public Range getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Range( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = start_.toBuilder(); + } + start_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(start_); + start_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = end_.toBuilder(); + } + end_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(end_); + end_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Range parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Range(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + public static final int START_FIELD_NUMBER = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public boolean hasStart() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() { + return start_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() { + return start_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + public static final int END_FIELD_NUMBER = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public boolean hasEnd() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() { + return end_; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() { + return end_; + } + + private void initFields() { + key_ = ""; + start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (hasStart()) { + if (!getStart().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasEnd()) { + if (!getEnd().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, start_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, end_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, start_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, end_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStartFieldBuilder(); + getEndFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (startBuilder_ == null) { + start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + } else { + startBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (endBuilder_ == null) { + end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + } else { + endBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (startBuilder_ == null) { + result.start_ = start_; + } else { + result.start_ = startBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (endBuilder_ == null) { + result.end_ = end_; + } else { + result.end_ = endBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasStart()) { + mergeStart(other.getStart()); + } + if (other.hasEnd()) { + mergeEnd(other.getEnd()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + if (hasStart()) { + if (!getStart().isInitialized()) { + + return false; + } + } + if (hasEnd()) { + if (!getEnd().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string key = 1; + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> startBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public boolean hasStart() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getStart() { + if (startBuilder_ == null) { + return start_; + } else { + return startBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public Builder setStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { + if (startBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + start_ = value; + onChanged(); + } else { + startBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public Builder setStart( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) { + if (startBuilder_ == null) { + start_ = builderForValue.build(); + onChanged(); + } else { + startBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public Builder mergeStart(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { + if (startBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + start_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) { + start_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(start_).mergeFrom(value).buildPartial(); + } else { + start_ = value; + } + onChanged(); + } else { + startBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public Builder clearStart() { + if (startBuilder_ == null) { + start_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + onChanged(); + } else { + startBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getStartBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getStartFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getStartOrBuilder() { + if (startBuilder_ != null) { + return startBuilder_.getMessageOrBuilder(); + } else { + return start_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark start = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> + getStartFieldBuilder() { + if (startBuilder_ == null) { + startBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>( + start_, + getParentForChildren(), + isClean()); + start_ = null; + } + return startBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> endBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public boolean hasEnd() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark getEnd() { + if (endBuilder_ == null) { + return end_; + } else { + return endBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public Builder setEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { + if (endBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + end_ = value; + onChanged(); + } else { + endBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public Builder setEnd( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder builderForValue) { + if (endBuilder_ == null) { + end_ = builderForValue.build(); + onChanged(); + } else { + endBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public Builder mergeEnd(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark value) { + if (endBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + end_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance()) { + end_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.newBuilder(end_).mergeFrom(value).buildPartial(); + } else { + end_ = value; + } + onChanged(); + } else { + endBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public Builder clearEnd() { + if (endBuilder_ == null) { + end_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.getDefaultInstance(); + onChanged(); + } else { + endBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder getEndBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getEndFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder getEndOrBuilder() { + if (endBuilder_ != null) { + return endBuilder_.getMessageOrBuilder(); + } else { + return end_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Mark end = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder> + getEndFieldBuilder() { + if (endBuilder_ == null) { + endBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Mark.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.MarkOrBuilder>( + end_, + getParentForChildren(), + isClean()); + end_ = null; + } + return endBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range) + } + + static { + defaultInstance = new Range(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range) + } + + public interface OperatorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + boolean hasType(); + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType(); + + // required string key = 2; + /** + * required string key = 2; + */ + boolean hasKey(); + /** + * required string key = 2; + */ + java.lang.String getKey(); + /** + * required string key = 2; + */ + com.google.protobuf.ByteString + getKeyBytes(); + + // required string val = 3; + /** + * required string val = 3; + */ + boolean hasVal(); + /** + * required string val = 3; + */ + java.lang.String getVal(); + /** + * required string val = 3; + */ + com.google.protobuf.ByteString + getValBytes(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator} + */ + public static final class Operator extends + com.google.protobuf.GeneratedMessage + implements OperatorOrBuilder { + // Use Operator.newBuilder() to construct. + private Operator(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Operator(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Operator defaultInstance; + public static Operator getDefaultInstance() { + return defaultInstance; + } + + public Operator getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Operator( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + bitField0_ |= 0x00000002; + key_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + val_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Operator parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Operator(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type} + */ + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + /** + * LIKE = 0; + */ + LIKE(0, 0), + /** + * NOTEQUALS = 1; + */ + NOTEQUALS(1, 1), + ; + + /** + * LIKE = 0; + */ + public static final int LIKE_VALUE = 0; + /** + * NOTEQUALS = 1; + */ + public static final int NOTEQUALS_VALUE = 1; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 0: return LIKE; + case 1: return NOTEQUALS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type) + } + + private int bitField0_; + // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() { + return type_; + } + + // required string key = 2; + public static final int KEY_FIELD_NUMBER = 2; + private java.lang.Object key_; + /** + * required string key = 2; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string key = 2; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 2; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string val = 3; + public static final int VAL_FIELD_NUMBER = 3; + private java.lang.Object val_; + /** + * required string val = 3; + */ + public boolean hasVal() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string val = 3; + */ + public java.lang.String getVal() { + java.lang.Object ref = val_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + val_ = s; + } + return s; + } + } + /** + * required string val = 3; + */ + public com.google.protobuf.ByteString + getValBytes() { + java.lang.Object ref = val_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + val_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; + key_ = ""; + val_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasVal()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getKeyBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getValBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getKeyBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getValBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; + bitField0_ = (bitField0_ & ~0x00000001); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + val_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.val_ = val_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasKey()) { + bitField0_ |= 0x00000002; + key_ = other.key_; + onChanged(); + } + if (other.hasVal()) { + bitField0_ |= 0x00000004; + val_ = other.val_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasKey()) { + + return false; + } + if (!hasVal()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type getType() { + return type_; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public Builder setType(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator.Type type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type.LIKE; + onChanged(); + return this; + } + + // required string key = 2; + private java.lang.Object key_ = ""; + /** + * required string key = 2; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string key = 2; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 2; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 2; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 2; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000002); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 2; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + key_ = value; + onChanged(); + return this; + } + + // required string val = 3; + private java.lang.Object val_ = ""; + /** + * required string val = 3; + */ + public boolean hasVal() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string val = 3; + */ + public java.lang.String getVal() { + java.lang.Object ref = val_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + val_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string val = 3; + */ + public com.google.protobuf.ByteString + getValBytes() { + java.lang.Object ref = val_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + val_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string val = 3; + */ + public Builder setVal( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + val_ = value; + onChanged(); + return this; + } + /** + * required string val = 3; + */ + public Builder clearVal() { + bitField0_ = (bitField0_ & ~0x00000004); + val_ = getDefaultInstance().getVal(); + onChanged(); + return this; + } + /** + * required string val = 3; + */ + public Builder setValBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + val_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator) + } + + static { + defaultInstance = new Operator(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator) + } + + private int bitField0_; + // required string names = 1; + public static final int NAMES_FIELD_NUMBER = 1; + private java.lang.Object names_; + /** + * required string names = 1; + */ + public boolean hasNames() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string names = 1; + */ + public java.lang.String getNames() { + java.lang.Object ref = names_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + names_ = s; + } + return s; + } + } + /** + * required string names = 1; + */ + public com.google.protobuf.ByteString + getNamesBytes() { + java.lang.Object ref = names_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + names_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string types = 2; + public static final int TYPES_FIELD_NUMBER = 2; + private java.lang.Object types_; + /** + * required string types = 2; + */ + public boolean hasTypes() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string types = 2; + */ + public java.lang.String getTypes() { + java.lang.Object ref = types_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + types_ = s; + } + return s; + } + } + /** + * required string types = 2; + */ + public com.google.protobuf.ByteString + getTypesBytes() { + java.lang.Object ref = types_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + types_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + public static final int OP_FIELD_NUMBER = 3; + private java.util.List op_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public java.util.List getOpList() { + return op_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public java.util.List + getOpOrBuilderList() { + return op_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public int getOpCount() { + return op_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) { + return op_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( + int index) { + return op_.get(index); + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + public static final int RANGE_FIELD_NUMBER = 4; + private java.util.List range_; + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public java.util.List getRangeList() { + return range_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public java.util.List + getRangeOrBuilderList() { + return range_; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public int getRangeCount() { + return range_.size(); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) { + return range_.get(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( + int index) { + return range_.get(index); + } + + private void initFields() { + names_ = ""; + types_ = ""; + op_ = java.util.Collections.emptyList(); + range_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNames()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTypes()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getOpCount(); i++) { + if (!getOp(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRangeCount(); i++) { + if (!getRange(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNamesBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTypesBytes()); + } + for (int i = 0; i < op_.size(); i++) { + output.writeMessage(3, op_.get(i)); + } + for (int i = 0; i < range_.size(); i++) { + output.writeMessage(4, range_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNamesBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTypesBytes()); + } + for (int i = 0; i < op_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, op_.get(i)); + } + for (int i = 0; i < range_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, range_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparatorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getOpFieldBuilder(); + getRangeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + names_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + types_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + if (opBuilder_ == null) { + op_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + opBuilder_.clear(); + } + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + rangeBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.names_ = names_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.types_ = types_; + if (opBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + op_ = java.util.Collections.unmodifiableList(op_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.op_ = op_; + } else { + result.op_ = opBuilder_.build(); + } + if (rangeBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + range_ = java.util.Collections.unmodifiableList(range_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.range_ = range_; + } else { + result.range_ = rangeBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.getDefaultInstance()) return this; + if (other.hasNames()) { + bitField0_ |= 0x00000001; + names_ = other.names_; + onChanged(); + } + if (other.hasTypes()) { + bitField0_ |= 0x00000002; + types_ = other.types_; + onChanged(); + } + if (opBuilder_ == null) { + if (!other.op_.isEmpty()) { + if (op_.isEmpty()) { + op_ = other.op_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureOpIsMutable(); + op_.addAll(other.op_); + } + onChanged(); + } + } else { + if (!other.op_.isEmpty()) { + if (opBuilder_.isEmpty()) { + opBuilder_.dispose(); + opBuilder_ = null; + op_ = other.op_; + bitField0_ = (bitField0_ & ~0x00000004); + opBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getOpFieldBuilder() : null; + } else { + opBuilder_.addAllMessages(other.op_); + } + } + } + if (rangeBuilder_ == null) { + if (!other.range_.isEmpty()) { + if (range_.isEmpty()) { + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureRangeIsMutable(); + range_.addAll(other.range_); + } + onChanged(); + } + } else { + if (!other.range_.isEmpty()) { + if (rangeBuilder_.isEmpty()) { + rangeBuilder_.dispose(); + rangeBuilder_ = null; + range_ = other.range_; + bitField0_ = (bitField0_ & ~0x00000008); + rangeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRangeFieldBuilder() : null; + } else { + rangeBuilder_.addAllMessages(other.range_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNames()) { + + return false; + } + if (!hasTypes()) { + + return false; + } + for (int i = 0; i < getOpCount(); i++) { + if (!getOp(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRangeCount(); i++) { + if (!getRange(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string names = 1; + private java.lang.Object names_ = ""; + /** + * required string names = 1; + */ + public boolean hasNames() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string names = 1; + */ + public java.lang.String getNames() { + java.lang.Object ref = names_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + names_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string names = 1; + */ + public com.google.protobuf.ByteString + getNamesBytes() { + java.lang.Object ref = names_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + names_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string names = 1; + */ + public Builder setNames( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + names_ = value; + onChanged(); + return this; + } + /** + * required string names = 1; + */ + public Builder clearNames() { + bitField0_ = (bitField0_ & ~0x00000001); + names_ = getDefaultInstance().getNames(); + onChanged(); + return this; + } + /** + * required string names = 1; + */ + public Builder setNamesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + names_ = value; + onChanged(); + return this; + } + + // required string types = 2; + private java.lang.Object types_ = ""; + /** + * required string types = 2; + */ + public boolean hasTypes() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string types = 2; + */ + public java.lang.String getTypes() { + java.lang.Object ref = types_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + types_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string types = 2; + */ + public com.google.protobuf.ByteString + getTypesBytes() { + java.lang.Object ref = types_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + types_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string types = 2; + */ + public Builder setTypes( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + types_ = value; + onChanged(); + return this; + } + /** + * required string types = 2; + */ + public Builder clearTypes() { + bitField0_ = (bitField0_ & ~0x00000002); + types_ = getDefaultInstance().getTypes(); + onChanged(); + return this; + } + /** + * required string types = 2; + */ + public Builder setTypesBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + types_ = value; + onChanged(); + return this; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + private java.util.List op_ = + java.util.Collections.emptyList(); + private void ensureOpIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + op_ = new java.util.ArrayList(op_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder> opBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public java.util.List getOpList() { + if (opBuilder_ == null) { + return java.util.Collections.unmodifiableList(op_); + } else { + return opBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public int getOpCount() { + if (opBuilder_ == null) { + return op_.size(); + } else { + return opBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator getOp(int index) { + if (opBuilder_ == null) { + return op_.get(index); + } else { + return opBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder setOp( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { + if (opBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpIsMutable(); + op_.set(index, value); + onChanged(); + } else { + opBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder setOp( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { + if (opBuilder_ == null) { + ensureOpIsMutable(); + op_.set(index, builderForValue.build()); + onChanged(); + } else { + opBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder addOp(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { + if (opBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpIsMutable(); + op_.add(value); + onChanged(); + } else { + opBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder addOp( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator value) { + if (opBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpIsMutable(); + op_.add(index, value); + onChanged(); + } else { + opBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder addOp( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { + if (opBuilder_ == null) { + ensureOpIsMutable(); + op_.add(builderForValue.build()); + onChanged(); + } else { + opBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder addOp( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder builderForValue) { + if (opBuilder_ == null) { + ensureOpIsMutable(); + op_.add(index, builderForValue.build()); + onChanged(); + } else { + opBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder addAllOp( + java.lang.Iterable values) { + if (opBuilder_ == null) { + ensureOpIsMutable(); + super.addAll(values, op_); + onChanged(); + } else { + opBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder clearOp() { + if (opBuilder_ == null) { + op_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + opBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public Builder removeOp(int index) { + if (opBuilder_ == null) { + ensureOpIsMutable(); + op_.remove(index); + onChanged(); + } else { + opBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder getOpBuilder( + int index) { + return getOpFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder getOpOrBuilder( + int index) { + if (opBuilder_ == null) { + return op_.get(index); } else { + return opBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public java.util.List + getOpOrBuilderList() { + if (opBuilder_ != null) { + return opBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(op_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder() { + return getOpFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder addOpBuilder( + int index) { + return getOpFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator op = 3; + */ + public java.util.List + getOpBuilderList() { + return getOpFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder> + getOpFieldBuilder() { + if (opBuilder_ == null) { + opBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.OperatorOrBuilder>( + op_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + op_ = null; + } + return opBuilder_; + } + + // repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + private java.util.List range_ = + java.util.Collections.emptyList(); + private void ensureRangeIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + range_ = new java.util.ArrayList(range_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder> rangeBuilder_; + + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public java.util.List getRangeList() { + if (rangeBuilder_ == null) { + return java.util.Collections.unmodifiableList(range_); + } else { + return rangeBuilder_.getMessageList(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public int getRangeCount() { + if (rangeBuilder_ == null) { + return range_.size(); + } else { + return rangeBuilder_.getCount(); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range getRange(int index) { + if (rangeBuilder_ == null) { + return range_.get(index); + } else { + return rangeBuilder_.getMessage(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder setRange( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.set(index, value); + onChanged(); + } else { + rangeBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder setRange( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.set(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder addRange(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(value); + onChanged(); + } else { + rangeBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder addRange( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range value) { + if (rangeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRangeIsMutable(); + range_.add(index, value); + onChanged(); + } else { + rangeBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder addRange( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder addRange( + int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder builderForValue) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.add(index, builderForValue.build()); + onChanged(); + } else { + rangeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder addAllRange( + java.lang.Iterable values) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + super.addAll(values, range_); + onChanged(); + } else { + rangeBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder clearRange() { + if (rangeBuilder_ == null) { + range_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + rangeBuilder_.clear(); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public Builder removeRange(int index) { + if (rangeBuilder_ == null) { + ensureRangeIsMutable(); + range_.remove(index); + onChanged(); + } else { + rangeBuilder_.remove(index); + } + return this; + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder getRangeBuilder( + int index) { + return getRangeFieldBuilder().getBuilder(index); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder getRangeOrBuilder( + int index) { + if (rangeBuilder_ == null) { + return range_.get(index); } else { + return rangeBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public java.util.List + getRangeOrBuilderList() { + if (rangeBuilder_ != null) { + return rangeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(range_); + } + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder() { + return getRangeFieldBuilder().addBuilder( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder addRangeBuilder( + int index) { + return getRangeFieldBuilder().addBuilder( + index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.getDefaultInstance()); + } + /** + * repeated .org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Range range = 4; + */ + public java.util.List + getRangeBuilderList() { + return getRangeFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder> + getRangeFieldBuilder() { + if (rangeBuilder_ == null) { + rangeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Range.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.RangeOrBuilder>( + range_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + range_ = null; + } + return rangeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator) + } + + static { + defaultInstance = new PartitionKeyComparator(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\033hbase_metastore_proto.proto\022&org.apach" + + "e.hadoop.hive.metastore.hbase\"h\n\tAggrSta" + + "ts\022\023\n\013parts_found\030\001 \002(\003\022F\n\tcol_stats\030\002 \003" + + "(\01323.org.apache.hadoop.hive.metastore.hb" + + "ase.ColumnStats\"\364\001\n\024AggrStatsBloomFilter" + + "\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030\002 \002(\014\022^\n" + + "\014bloom_filter\030\003 \002(\0132H.org.apache.hadoop." + + "hive.metastore.hbase.AggrStatsBloomFilte" + + "r.BloomFilter\022\025\n\raggregated_at\030\004 \002(\003\032@\n\013" + + "BloomFilter\022\020\n\010num_bits\030\001 \002(\005\022\021\n\tnum_fun", + "cs\030\002 \002(\005\022\014\n\004bits\030\003 \003(\003\"\357\001\n\032AggrStatsInva" + + "lidatorFilter\022_\n\rto_invalidate\030\001 \003(\0132H.o" + + "rg.apache.hadoop.hive.metastore.hbase.Ag" + + "grStatsInvalidatorFilter.Entry\022\021\n\trun_ev" + + "ery\030\002 \002(\003\022\034\n\024max_cache_entry_life\030\003 \002(\003\032" + + "?\n\005Entry\022\017\n\007db_name\030\001 \002(\014\022\022\n\ntable_name\030" + + "\002 \002(\014\022\021\n\tpart_name\030\003 \002(\014\"\335\010\n\013ColumnStats" + + "\022\025\n\rlast_analyzed\030\001 \001(\003\022\023\n\013column_type\030\002" + + " \002(\t\022\021\n\tnum_nulls\030\003 \001(\003\022\033\n\023num_distinct_" + + "values\030\004 \001(\003\022T\n\nbool_stats\030\005 \001(\0132@.org.a", + "pache.hadoop.hive.metastore.hbase.Column" + + "Stats.BooleanStats\022Q\n\nlong_stats\030\006 \001(\0132=" + + ".org.apache.hadoop.hive.metastore.hbase." + + "ColumnStats.LongStats\022U\n\014double_stats\030\007 " + + "\001(\0132?.org.apache.hadoop.hive.metastore.h" + + "base.ColumnStats.DoubleStats\022U\n\014string_s" + + "tats\030\010 \001(\0132?.org.apache.hadoop.hive.meta" + + "store.hbase.ColumnStats.StringStats\022U\n\014b" + + "inary_stats\030\t \001(\0132?.org.apache.hadoop.hi" + + "ve.metastore.hbase.ColumnStats.StringSta", + "ts\022W\n\rdecimal_stats\030\n \001(\0132@.org.apache.h" + + "adoop.hive.metastore.hbase.ColumnStats.D" + + "ecimalStats\022\023\n\013column_name\030\013 \001(\t\0325\n\014Bool" + + "eanStats\022\021\n\tnum_trues\030\001 \001(\003\022\022\n\nnum_false" + + "s\030\002 \001(\003\0322\n\tLongStats\022\021\n\tlow_value\030\001 \001(\022\022" + + "\022\n\nhigh_value\030\002 \001(\022\0324\n\013DoubleStats\022\021\n\tlo" + + "w_value\030\001 \001(\001\022\022\n\nhigh_value\030\002 \001(\001\032=\n\013Str" + + "ingStats\022\026\n\016max_col_length\030\001 \001(\003\022\026\n\016avg_" + + "col_length\030\002 \001(\001\032\365\001\n\014DecimalStats\022[\n\tlow" + + "_value\030\001 \001(\0132H.org.apache.hadoop.hive.me", + "tastore.hbase.ColumnStats.DecimalStats.D" + + "ecimal\022\\\n\nhigh_value\030\002 \001(\0132H.org.apache." + + "hadoop.hive.metastore.hbase.ColumnStats." + + "DecimalStats.Decimal\032*\n\007Decimal\022\020\n\010unsca" + + "led\030\001 \002(\014\022\r\n\005scale\030\002 \002(\005\"\246\002\n\010Database\022\023\n" + + "\013description\030\001 \001(\t\022\013\n\003uri\030\002 \001(\t\022F\n\nparam" + + "eters\030\003 \001(\01322.org.apache.hadoop.hive.met" + + "astore.hbase.Parameters\022Q\n\nprivileges\030\004 " + + "\001(\0132=.org.apache.hadoop.hive.metastore.h" + + "base.PrincipalPrivilegeSet\022\022\n\nowner_name", + "\030\005 \001(\t\022I\n\nowner_type\030\006 \001(\01625.org.apache." + + "hadoop.hive.metastore.hbase.PrincipalTyp" + + "e\"$\n\017DelegationToken\022\021\n\ttoken_str\030\001 \002(\t\"" + + ":\n\013FieldSchema\022\014\n\004name\030\001 \002(\t\022\014\n\004type\030\002 \002" + + "(\t\022\017\n\007comment\030\003 \001(\t\"\206\004\n\010Function\022\022\n\nclas" + + "s_name\030\001 \001(\t\022\022\n\nowner_name\030\002 \001(\t\022I\n\nowne" + + "r_type\030\003 \001(\01625.org.apache.hadoop.hive.me" + + "tastore.hbase.PrincipalType\022\023\n\013create_ti" + + "me\030\004 \001(\022\022T\n\rfunction_type\030\005 \001(\0162=.org.ap" + + "ache.hadoop.hive.metastore.hbase.Functio", + "n.FunctionType\022S\n\rresource_uris\030\006 \003(\0132<." + + "org.apache.hadoop.hive.metastore.hbase.F" + + "unction.ResourceUri\032\254\001\n\013ResourceUri\022`\n\rr" + + "esource_type\030\001 \002(\0162I.org.apache.hadoop.h" + + "ive.metastore.hbase.Function.ResourceUri" + + ".ResourceType\022\013\n\003uri\030\002 \002(\t\".\n\014ResourceTy" + + "pe\022\007\n\003JAR\020\001\022\010\n\004FILE\020\002\022\013\n\007ARCHIVE\020\003\"\030\n\014Fu" + + "nctionType\022\010\n\004JAVA\020\001\"\037\n\tMasterKey\022\022\n\nmas" + + "ter_key\030\001 \002(\t\",\n\016ParameterEntry\022\013\n\003key\030\001" + + " \002(\t\022\r\n\005value\030\002 \002(\t\"W\n\nParameters\022I\n\tpar", + "ameter\030\001 \003(\01326.org.apache.hadoop.hive.me" + + "tastore.hbase.ParameterEntry\"\360\001\n\tPartiti" + + "on\022\023\n\013create_time\030\001 \001(\003\022\030\n\020last_access_t" + + "ime\030\002 \001(\003\022\020\n\010location\030\003 \001(\t\022I\n\rsd_parame" + + "ters\030\004 \001(\01322.org.apache.hadoop.hive.meta" + + "store.hbase.Parameters\022\017\n\007sd_hash\030\005 \002(\014\022" + + "F\n\nparameters\030\006 \001(\01322.org.apache.hadoop." + + "hive.metastore.hbase.Parameters\"\204\001\n\032Prin" + + "cipalPrivilegeSetEntry\022\026\n\016principal_name" + + "\030\001 \002(\t\022N\n\nprivileges\030\002 \003(\0132:.org.apache.", + "hadoop.hive.metastore.hbase.PrivilegeGra" + + "ntInfo\"\275\001\n\025PrincipalPrivilegeSet\022Q\n\005user" + + "s\030\001 \003(\0132B.org.apache.hadoop.hive.metasto" + + "re.hbase.PrincipalPrivilegeSetEntry\022Q\n\005r" + + "oles\030\002 \003(\0132B.org.apache.hadoop.hive.meta" + + "store.hbase.PrincipalPrivilegeSetEntry\"\260" + + "\001\n\022PrivilegeGrantInfo\022\021\n\tprivilege\030\001 \001(\t" + + "\022\023\n\013create_time\030\002 \001(\003\022\017\n\007grantor\030\003 \001(\t\022K" + + "\n\014grantor_type\030\004 \001(\01625.org.apache.hadoop" + + ".hive.metastore.hbase.PrincipalType\022\024\n\014g", + "rant_option\030\005 \001(\010\"\374\001\n\rRoleGrantInfo\022\026\n\016p" + + "rincipal_name\030\001 \002(\t\022M\n\016principal_type\030\002 " + + "\002(\01625.org.apache.hadoop.hive.metastore.h" + + "base.PrincipalType\022\020\n\010add_time\030\003 \001(\003\022\017\n\007" + + "grantor\030\004 \001(\t\022K\n\014grantor_type\030\005 \001(\01625.or" + + "g.apache.hadoop.hive.metastore.hbase.Pri" + + "ncipalType\022\024\n\014grant_option\030\006 \001(\010\"^\n\021Role" + + "GrantInfoList\022I\n\ngrant_info\030\001 \003(\01325.org." + + "apache.hadoop.hive.metastore.hbase.RoleG" + + "rantInfo\"\030\n\010RoleList\022\014\n\004role\030\001 \003(\t\"/\n\004Ro", + "le\022\023\n\013create_time\030\001 \001(\003\022\022\n\nowner_name\030\002 " + + "\001(\t\"\254\010\n\021StorageDescriptor\022A\n\004cols\030\001 \003(\0132" + + "3.org.apache.hadoop.hive.metastore.hbase" + + ".FieldSchema\022\024\n\014input_format\030\002 \001(\t\022\025\n\rou" + + "tput_format\030\003 \001(\t\022\025\n\ris_compressed\030\004 \001(\010" + + "\022\023\n\013num_buckets\030\005 \001(\021\022W\n\nserde_info\030\006 \001(" + + "\0132C.org.apache.hadoop.hive.metastore.hba" + + "se.StorageDescriptor.SerDeInfo\022\023\n\013bucket" + + "_cols\030\007 \003(\t\022R\n\tsort_cols\030\010 \003(\0132?.org.apa" + + "che.hadoop.hive.metastore.hbase.StorageD", + "escriptor.Order\022Y\n\013skewed_info\030\t \001(\0132D.o" + + "rg.apache.hadoop.hive.metastore.hbase.St" + + "orageDescriptor.SkewedInfo\022!\n\031stored_as_" + + "sub_directories\030\n \001(\010\032.\n\005Order\022\023\n\013column" + + "_name\030\001 \002(\t\022\020\n\005order\030\002 \001(\021:\0011\032|\n\tSerDeIn" + + "fo\022\014\n\004name\030\001 \001(\t\022\031\n\021serialization_lib\030\002 " + + "\001(\t\022F\n\nparameters\030\003 \001(\01322.org.apache.had" + + "oop.hive.metastore.hbase.Parameters\032\214\003\n\n" + + "SkewedInfo\022\030\n\020skewed_col_names\030\001 \003(\t\022r\n\021" + + "skewed_col_values\030\002 \003(\0132W.org.apache.had", + "oop.hive.metastore.hbase.StorageDescript" + + "or.SkewedInfo.SkewedColValueList\022\206\001\n\036ske" + + "wed_col_value_location_maps\030\003 \003(\0132^.org." + + "apache.hadoop.hive.metastore.hbase.Stora" + + "geDescriptor.SkewedInfo.SkewedColValueLo" + + "cationMap\032.\n\022SkewedColValueList\022\030\n\020skewe" + + "d_col_value\030\001 \003(\t\0327\n\031SkewedColValueLocat" + + "ionMap\022\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004\n\005T" + + "able\022\r\n\005owner\030\001 \001(\t\022\023\n\013create_time\030\002 \001(\003" + + "\022\030\n\020last_access_time\030\003 \001(\003\022\021\n\tretention\030", + "\004 \001(\003\022\020\n\010location\030\005 \001(\t\022I\n\rsd_parameters" + + "\030\006 \001(\01322.org.apache.hadoop.hive.metastor" + + "e.hbase.Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K\n\016p" + + "artition_keys\030\010 \003(\01323.org.apache.hadoop." + + "hive.metastore.hbase.FieldSchema\022F\n\npara" + + "meters\030\t \001(\01322.org.apache.hadoop.hive.me" + + "tastore.hbase.Parameters\022\032\n\022view_origina" + + "l_text\030\n \001(\t\022\032\n\022view_expanded_text\030\013 \001(\t" + + "\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivileges\030\r \001(\013" + + "2=.org.apache.hadoop.hive.metastore.hbas", + "e.PrincipalPrivilegeSet\022\024\n\014is_temporary\030" + + "\016 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005name" + + "s\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G.org" + + ".apache.hadoop.hive.metastore.hbase.Part" + + "itionKeyComparator.Operator\022S\n\005range\030\004 \003" + + "(\0132D.org.apache.hadoop.hive.metastore.hb" + + "ase.PartitionKeyComparator.Range\032(\n\004Mark" + + "\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001\n\005R" + + "ange\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.org.a" + + "pache.hadoop.hive.metastore.hbase.Partit", + "ionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C.org" + + ".apache.hadoop.hive.metastore.hbase.Part" + + "itionKeyComparator.Mark\032\241\001\n\010Operator\022Z\n\004" + + "type\030\001 \002(\0162L.org.apache.hadoop.hive.meta" + + "store.hbase.PartitionKeyComparator.Opera" + + "tor.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Ty" + + "pe\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*#\n\rPrincipal" + + "Type\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor, + new java.lang.String[] { "PartsFound", "ColStats", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor, + new java.lang.String[] { "DbName", "TableName", "BloomFilter", "AggregatedAt", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor, + new java.lang.String[] { "NumBits", "NumFuncs", "Bits", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor, + new java.lang.String[] { "ToInvalidate", "RunEvery", "MaxCacheEntryLife", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsInvalidatorFilter_Entry_descriptor, + new java.lang.String[] { "DbName", "TableName", "PartName", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor, + new java.lang.String[] { "LastAnalyzed", "ColumnType", "NumNulls", "NumDistinctValues", "BoolStats", "LongStats", "DoubleStats", "StringStats", "BinaryStats", "DecimalStats", "ColumnName", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_BooleanStats_descriptor, + new java.lang.String[] { "NumTrues", "NumFalses", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_LongStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DoubleStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(3); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_StringStats_descriptor, + new java.lang.String[] { "MaxColLength", "AvgColLength", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_descriptor.getNestedTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor, + new java.lang.String[] { "LowValue", "HighValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ColumnStats_DecimalStats_Decimal_descriptor, + new java.lang.String[] { "Unscaled", "Scale", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Database_descriptor, + new java.lang.String[] { "Description", "Uri", "Parameters", "Privileges", "OwnerName", "OwnerType", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_DelegationToken_descriptor, + new java.lang.String[] { "TokenStr", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_FieldSchema_descriptor, + new java.lang.String[] { "Name", "Type", "Comment", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor, + new java.lang.String[] { "ClassName", "OwnerName", "OwnerType", "CreateTime", "FunctionType", "ResourceUris", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Function_ResourceUri_descriptor, + new java.lang.String[] { "ResourceType", "Uri", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_MasterKey_descriptor, + new java.lang.String[] { "MasterKey", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ParameterEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Parameters_descriptor, + new java.lang.String[] { "Parameter", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Partition_descriptor, + new java.lang.String[] { "CreateTime", "LastAccessTime", "Location", "SdParameters", "SdHash", "Parameters", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSetEntry_descriptor, + new java.lang.String[] { "PrincipalName", "Privileges", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrincipalPrivilegeSet_descriptor, + new java.lang.String[] { "Users", "Roles", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PrivilegeGrantInfo_descriptor, + new java.lang.String[] { "Privilege", "CreateTime", "Grantor", "GrantorType", "GrantOption", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfo_descriptor, + new java.lang.String[] { "PrincipalName", "PrincipalType", "AddTime", "Grantor", "GrantorType", "GrantOption", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleGrantInfoList_descriptor, + new java.lang.String[] { "GrantInfo", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_RoleList_descriptor, + new java.lang.String[] { "Role", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Role_descriptor, + new java.lang.String[] { "CreateTime", "OwnerName", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor, + new java.lang.String[] { "Cols", "InputFormat", "OutputFormat", "IsCompressed", "NumBuckets", "SerdeInfo", "BucketCols", "SortCols", "SkewedInfo", "StoredAsSubDirectories", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor, + new java.lang.String[] { "ColumnName", "Order", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor, + new java.lang.String[] { "Name", "SerializationLib", "Parameters", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor, + new java.lang.String[] { "SkewedColNames", "SkewedColValues", "SkewedColValueLocationMaps", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueList_descriptor, + new java.lang.String[] { "SkewedColValue", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SkewedInfo_SkewedColValueLocationMap_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, + new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor = + getDescriptor().getMessageTypes().get(21); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor, + new java.lang.String[] { "Names", "Types", "Op", "Range", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(0); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Mark_descriptor, + new java.lang.String[] { "Value", "Inclusive", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(1); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Range_descriptor, + new java.lang.String[] { "Key", "Start", "End", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor = + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor.getNestedTypes().get(2); + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_Operator_descriptor, + new java.lang.String[] { "Type", "Key", "Val", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 0354fe1..2872f85 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1235,14 +1235,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size649; - ::apache::thrift::protocol::TType _etype652; - xfer += iprot->readListBegin(_etype652, _size649); - this->success.resize(_size649); - uint32_t _i653; - for (_i653 = 0; _i653 < _size649; ++_i653) + uint32_t _size719; + ::apache::thrift::protocol::TType _etype722; + xfer += iprot->readListBegin(_etype722, _size719); + this->success.resize(_size719); + uint32_t _i723; + for (_i723 = 0; _i723 < _size719; ++_i723) { - xfer += iprot->readString(this->success[_i653]); + xfer += iprot->readString(this->success[_i723]); } xfer += iprot->readListEnd(); } @@ -1281,10 +1281,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter654; - for (_iter654 = this->success.begin(); _iter654 != this->success.end(); ++_iter654) + std::vector ::const_iterator _iter724; + for (_iter724 = this->success.begin(); _iter724 != this->success.end(); ++_iter724) { - xfer += oprot->writeString((*_iter654)); + xfer += oprot->writeString((*_iter724)); } xfer += oprot->writeListEnd(); } @@ -1328,14 +1328,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size655; - ::apache::thrift::protocol::TType _etype658; - xfer += iprot->readListBegin(_etype658, _size655); - (*(this->success)).resize(_size655); - uint32_t _i659; - for (_i659 = 0; _i659 < _size655; ++_i659) + uint32_t _size725; + ::apache::thrift::protocol::TType _etype728; + xfer += iprot->readListBegin(_etype728, _size725); + (*(this->success)).resize(_size725); + uint32_t _i729; + for (_i729 = 0; _i729 < _size725; ++_i729) { - xfer += iprot->readString((*(this->success))[_i659]); + xfer += iprot->readString((*(this->success))[_i729]); } xfer += iprot->readListEnd(); } @@ -1452,14 +1452,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size660; - ::apache::thrift::protocol::TType _etype663; - xfer += iprot->readListBegin(_etype663, _size660); - this->success.resize(_size660); - uint32_t _i664; - for (_i664 = 0; _i664 < _size660; ++_i664) + uint32_t _size730; + ::apache::thrift::protocol::TType _etype733; + xfer += iprot->readListBegin(_etype733, _size730); + this->success.resize(_size730); + uint32_t _i734; + for (_i734 = 0; _i734 < _size730; ++_i734) { - xfer += iprot->readString(this->success[_i664]); + xfer += iprot->readString(this->success[_i734]); } xfer += iprot->readListEnd(); } @@ -1498,10 +1498,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter665; - for (_iter665 = this->success.begin(); _iter665 != this->success.end(); ++_iter665) + std::vector ::const_iterator _iter735; + for (_iter735 = this->success.begin(); _iter735 != this->success.end(); ++_iter735) { - xfer += oprot->writeString((*_iter665)); + xfer += oprot->writeString((*_iter735)); } xfer += oprot->writeListEnd(); } @@ -1545,14 +1545,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size666; - ::apache::thrift::protocol::TType _etype669; - xfer += iprot->readListBegin(_etype669, _size666); - (*(this->success)).resize(_size666); - uint32_t _i670; - for (_i670 = 0; _i670 < _size666; ++_i670) + uint32_t _size736; + ::apache::thrift::protocol::TType _etype739; + xfer += iprot->readListBegin(_etype739, _size736); + (*(this->success)).resize(_size736); + uint32_t _i740; + for (_i740 = 0; _i740 < _size736; ++_i740) { - xfer += iprot->readString((*(this->success))[_i670]); + xfer += iprot->readString((*(this->success))[_i740]); } xfer += iprot->readListEnd(); } @@ -2610,17 +2610,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size671; - ::apache::thrift::protocol::TType _ktype672; - ::apache::thrift::protocol::TType _vtype673; - xfer += iprot->readMapBegin(_ktype672, _vtype673, _size671); - uint32_t _i675; - for (_i675 = 0; _i675 < _size671; ++_i675) + uint32_t _size741; + ::apache::thrift::protocol::TType _ktype742; + ::apache::thrift::protocol::TType _vtype743; + xfer += iprot->readMapBegin(_ktype742, _vtype743, _size741); + uint32_t _i745; + for (_i745 = 0; _i745 < _size741; ++_i745) { - std::string _key676; - xfer += iprot->readString(_key676); - Type& _val677 = this->success[_key676]; - xfer += _val677.read(iprot); + std::string _key746; + xfer += iprot->readString(_key746); + Type& _val747 = this->success[_key746]; + xfer += _val747.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2659,11 +2659,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter678; - for (_iter678 = this->success.begin(); _iter678 != this->success.end(); ++_iter678) + std::map ::const_iterator _iter748; + for (_iter748 = this->success.begin(); _iter748 != this->success.end(); ++_iter748) { - xfer += oprot->writeString(_iter678->first); - xfer += _iter678->second.write(oprot); + xfer += oprot->writeString(_iter748->first); + xfer += _iter748->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2707,17 +2707,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size679; - ::apache::thrift::protocol::TType _ktype680; - ::apache::thrift::protocol::TType _vtype681; - xfer += iprot->readMapBegin(_ktype680, _vtype681, _size679); - uint32_t _i683; - for (_i683 = 0; _i683 < _size679; ++_i683) + uint32_t _size749; + ::apache::thrift::protocol::TType _ktype750; + ::apache::thrift::protocol::TType _vtype751; + xfer += iprot->readMapBegin(_ktype750, _vtype751, _size749); + uint32_t _i753; + for (_i753 = 0; _i753 < _size749; ++_i753) { - std::string _key684; - xfer += iprot->readString(_key684); - Type& _val685 = (*(this->success))[_key684]; - xfer += _val685.read(iprot); + std::string _key754; + xfer += iprot->readString(_key754); + Type& _val755 = (*(this->success))[_key754]; + xfer += _val755.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2871,14 +2871,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size686; - ::apache::thrift::protocol::TType _etype689; - xfer += iprot->readListBegin(_etype689, _size686); - this->success.resize(_size686); - uint32_t _i690; - for (_i690 = 0; _i690 < _size686; ++_i690) + uint32_t _size756; + ::apache::thrift::protocol::TType _etype759; + xfer += iprot->readListBegin(_etype759, _size756); + this->success.resize(_size756); + uint32_t _i760; + for (_i760 = 0; _i760 < _size756; ++_i760) { - xfer += this->success[_i690].read(iprot); + xfer += this->success[_i760].read(iprot); } xfer += iprot->readListEnd(); } @@ -2933,10 +2933,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter691; - for (_iter691 = this->success.begin(); _iter691 != this->success.end(); ++_iter691) + std::vector ::const_iterator _iter761; + for (_iter761 = this->success.begin(); _iter761 != this->success.end(); ++_iter761) { - xfer += (*_iter691).write(oprot); + xfer += (*_iter761).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2988,14 +2988,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size692; - ::apache::thrift::protocol::TType _etype695; - xfer += iprot->readListBegin(_etype695, _size692); - (*(this->success)).resize(_size692); - uint32_t _i696; - for (_i696 = 0; _i696 < _size692; ++_i696) + uint32_t _size762; + ::apache::thrift::protocol::TType _etype765; + xfer += iprot->readListBegin(_etype765, _size762); + (*(this->success)).resize(_size762); + uint32_t _i766; + for (_i766 = 0; _i766 < _size762; ++_i766) { - xfer += (*(this->success))[_i696].read(iprot); + xfer += (*(this->success))[_i766].read(iprot); } xfer += iprot->readListEnd(); } @@ -3181,14 +3181,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size697; - ::apache::thrift::protocol::TType _etype700; - xfer += iprot->readListBegin(_etype700, _size697); - this->success.resize(_size697); - uint32_t _i701; - for (_i701 = 0; _i701 < _size697; ++_i701) + uint32_t _size767; + ::apache::thrift::protocol::TType _etype770; + xfer += iprot->readListBegin(_etype770, _size767); + this->success.resize(_size767); + uint32_t _i771; + for (_i771 = 0; _i771 < _size767; ++_i771) { - xfer += this->success[_i701].read(iprot); + xfer += this->success[_i771].read(iprot); } xfer += iprot->readListEnd(); } @@ -3243,10 +3243,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter702; - for (_iter702 = this->success.begin(); _iter702 != this->success.end(); ++_iter702) + std::vector ::const_iterator _iter772; + for (_iter772 = this->success.begin(); _iter772 != this->success.end(); ++_iter772) { - xfer += (*_iter702).write(oprot); + xfer += (*_iter772).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3298,14 +3298,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size703; - ::apache::thrift::protocol::TType _etype706; - xfer += iprot->readListBegin(_etype706, _size703); - (*(this->success)).resize(_size703); - uint32_t _i707; - for (_i707 = 0; _i707 < _size703; ++_i707) + uint32_t _size773; + ::apache::thrift::protocol::TType _etype776; + xfer += iprot->readListBegin(_etype776, _size773); + (*(this->success)).resize(_size773); + uint32_t _i777; + for (_i777 = 0; _i777 < _size773; ++_i777) { - xfer += (*(this->success))[_i707].read(iprot); + xfer += (*(this->success))[_i777].read(iprot); } xfer += iprot->readListEnd(); } @@ -3475,14 +3475,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size708; - ::apache::thrift::protocol::TType _etype711; - xfer += iprot->readListBegin(_etype711, _size708); - this->success.resize(_size708); - uint32_t _i712; - for (_i712 = 0; _i712 < _size708; ++_i712) + uint32_t _size778; + ::apache::thrift::protocol::TType _etype781; + xfer += iprot->readListBegin(_etype781, _size778); + this->success.resize(_size778); + uint32_t _i782; + for (_i782 = 0; _i782 < _size778; ++_i782) { - xfer += this->success[_i712].read(iprot); + xfer += this->success[_i782].read(iprot); } xfer += iprot->readListEnd(); } @@ -3537,10 +3537,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter713; - for (_iter713 = this->success.begin(); _iter713 != this->success.end(); ++_iter713) + std::vector ::const_iterator _iter783; + for (_iter783 = this->success.begin(); _iter783 != this->success.end(); ++_iter783) { - xfer += (*_iter713).write(oprot); + xfer += (*_iter783).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3592,14 +3592,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size714; - ::apache::thrift::protocol::TType _etype717; - xfer += iprot->readListBegin(_etype717, _size714); - (*(this->success)).resize(_size714); - uint32_t _i718; - for (_i718 = 0; _i718 < _size714; ++_i718) + uint32_t _size784; + ::apache::thrift::protocol::TType _etype787; + xfer += iprot->readListBegin(_etype787, _size784); + (*(this->success)).resize(_size784); + uint32_t _i788; + for (_i788 = 0; _i788 < _size784; ++_i788) { - xfer += (*(this->success))[_i718].read(iprot); + xfer += (*(this->success))[_i788].read(iprot); } xfer += iprot->readListEnd(); } @@ -3785,14 +3785,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size719; - ::apache::thrift::protocol::TType _etype722; - xfer += iprot->readListBegin(_etype722, _size719); - this->success.resize(_size719); - uint32_t _i723; - for (_i723 = 0; _i723 < _size719; ++_i723) + uint32_t _size789; + ::apache::thrift::protocol::TType _etype792; + xfer += iprot->readListBegin(_etype792, _size789); + this->success.resize(_size789); + uint32_t _i793; + for (_i793 = 0; _i793 < _size789; ++_i793) { - xfer += this->success[_i723].read(iprot); + xfer += this->success[_i793].read(iprot); } xfer += iprot->readListEnd(); } @@ -3847,10 +3847,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter724; - for (_iter724 = this->success.begin(); _iter724 != this->success.end(); ++_iter724) + std::vector ::const_iterator _iter794; + for (_iter794 = this->success.begin(); _iter794 != this->success.end(); ++_iter794) { - xfer += (*_iter724).write(oprot); + xfer += (*_iter794).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3902,14 +3902,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size725; - ::apache::thrift::protocol::TType _etype728; - xfer += iprot->readListBegin(_etype728, _size725); - (*(this->success)).resize(_size725); - uint32_t _i729; - for (_i729 = 0; _i729 < _size725; ++_i729) + uint32_t _size795; + ::apache::thrift::protocol::TType _etype798; + xfer += iprot->readListBegin(_etype798, _size795); + (*(this->success)).resize(_size795); + uint32_t _i799; + for (_i799 = 0; _i799 < _size795; ++_i799) { - xfer += (*(this->success))[_i729].read(iprot); + xfer += (*(this->success))[_i799].read(iprot); } xfer += iprot->readListEnd(); } @@ -5079,14 +5079,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size730; - ::apache::thrift::protocol::TType _etype733; - xfer += iprot->readListBegin(_etype733, _size730); - this->success.resize(_size730); - uint32_t _i734; - for (_i734 = 0; _i734 < _size730; ++_i734) + uint32_t _size800; + ::apache::thrift::protocol::TType _etype803; + xfer += iprot->readListBegin(_etype803, _size800); + this->success.resize(_size800); + uint32_t _i804; + for (_i804 = 0; _i804 < _size800; ++_i804) { - xfer += iprot->readString(this->success[_i734]); + xfer += iprot->readString(this->success[_i804]); } xfer += iprot->readListEnd(); } @@ -5125,10 +5125,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter735; - for (_iter735 = this->success.begin(); _iter735 != this->success.end(); ++_iter735) + std::vector ::const_iterator _iter805; + for (_iter805 = this->success.begin(); _iter805 != this->success.end(); ++_iter805) { - xfer += oprot->writeString((*_iter735)); + xfer += oprot->writeString((*_iter805)); } xfer += oprot->writeListEnd(); } @@ -5172,14 +5172,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size736; - ::apache::thrift::protocol::TType _etype739; - xfer += iprot->readListBegin(_etype739, _size736); - (*(this->success)).resize(_size736); - uint32_t _i740; - for (_i740 = 0; _i740 < _size736; ++_i740) + uint32_t _size806; + ::apache::thrift::protocol::TType _etype809; + xfer += iprot->readListBegin(_etype809, _size806); + (*(this->success)).resize(_size806); + uint32_t _i810; + for (_i810 = 0; _i810 < _size806; ++_i810) { - xfer += iprot->readString((*(this->success))[_i740]); + xfer += iprot->readString((*(this->success))[_i810]); } xfer += iprot->readListEnd(); } @@ -5317,14 +5317,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size741; - ::apache::thrift::protocol::TType _etype744; - xfer += iprot->readListBegin(_etype744, _size741); - this->success.resize(_size741); - uint32_t _i745; - for (_i745 = 0; _i745 < _size741; ++_i745) + uint32_t _size811; + ::apache::thrift::protocol::TType _etype814; + xfer += iprot->readListBegin(_etype814, _size811); + this->success.resize(_size811); + uint32_t _i815; + for (_i815 = 0; _i815 < _size811; ++_i815) { - xfer += iprot->readString(this->success[_i745]); + xfer += iprot->readString(this->success[_i815]); } xfer += iprot->readListEnd(); } @@ -5363,10 +5363,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter746; - for (_iter746 = this->success.begin(); _iter746 != this->success.end(); ++_iter746) + std::vector ::const_iterator _iter816; + for (_iter816 = this->success.begin(); _iter816 != this->success.end(); ++_iter816) { - xfer += oprot->writeString((*_iter746)); + xfer += oprot->writeString((*_iter816)); } xfer += oprot->writeListEnd(); } @@ -5410,14 +5410,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size747; - ::apache::thrift::protocol::TType _etype750; - xfer += iprot->readListBegin(_etype750, _size747); - (*(this->success)).resize(_size747); - uint32_t _i751; - for (_i751 = 0; _i751 < _size747; ++_i751) + uint32_t _size817; + ::apache::thrift::protocol::TType _etype820; + xfer += iprot->readListBegin(_etype820, _size817); + (*(this->success)).resize(_size817); + uint32_t _i821; + for (_i821 = 0; _i821 < _size817; ++_i821) { - xfer += iprot->readString((*(this->success))[_i751]); + xfer += iprot->readString((*(this->success))[_i821]); } xfer += iprot->readListEnd(); } @@ -5725,14 +5725,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size752; - ::apache::thrift::protocol::TType _etype755; - xfer += iprot->readListBegin(_etype755, _size752); - this->tbl_names.resize(_size752); - uint32_t _i756; - for (_i756 = 0; _i756 < _size752; ++_i756) + uint32_t _size822; + ::apache::thrift::protocol::TType _etype825; + xfer += iprot->readListBegin(_etype825, _size822); + this->tbl_names.resize(_size822); + uint32_t _i826; + for (_i826 = 0; _i826 < _size822; ++_i826) { - xfer += iprot->readString(this->tbl_names[_i756]); + xfer += iprot->readString(this->tbl_names[_i826]); } xfer += iprot->readListEnd(); } @@ -5765,10 +5765,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter757; - for (_iter757 = this->tbl_names.begin(); _iter757 != this->tbl_names.end(); ++_iter757) + std::vector ::const_iterator _iter827; + for (_iter827 = this->tbl_names.begin(); _iter827 != this->tbl_names.end(); ++_iter827) { - xfer += oprot->writeString((*_iter757)); + xfer += oprot->writeString((*_iter827)); } xfer += oprot->writeListEnd(); } @@ -5797,10 +5797,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter758; - for (_iter758 = (*(this->tbl_names)).begin(); _iter758 != (*(this->tbl_names)).end(); ++_iter758) + std::vector ::const_iterator _iter828; + for (_iter828 = (*(this->tbl_names)).begin(); _iter828 != (*(this->tbl_names)).end(); ++_iter828) { - xfer += oprot->writeString((*_iter758)); + xfer += oprot->writeString((*_iter828)); } xfer += oprot->writeListEnd(); } @@ -5841,14 +5841,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size759; - ::apache::thrift::protocol::TType _etype762; - xfer += iprot->readListBegin(_etype762, _size759); - this->success.resize(_size759); - uint32_t _i763; - for (_i763 = 0; _i763 < _size759; ++_i763) + uint32_t _size829; + ::apache::thrift::protocol::TType _etype832; + xfer += iprot->readListBegin(_etype832, _size829); + this->success.resize(_size829); + uint32_t _i833; + for (_i833 = 0; _i833 < _size829; ++_i833) { - xfer += this->success[_i763].read(iprot); + xfer += this->success[_i833].read(iprot); } xfer += iprot->readListEnd(); } @@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter764; - for (_iter764 = this->success.begin(); _iter764 != this->success.end(); ++_iter764) + std::vector
::const_iterator _iter834; + for (_iter834 = this->success.begin(); _iter834 != this->success.end(); ++_iter834) { - xfer += (*_iter764).write(oprot); + xfer += (*_iter834).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5958,14 +5958,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size765; - ::apache::thrift::protocol::TType _etype768; - xfer += iprot->readListBegin(_etype768, _size765); - (*(this->success)).resize(_size765); - uint32_t _i769; - for (_i769 = 0; _i769 < _size765; ++_i769) + uint32_t _size835; + ::apache::thrift::protocol::TType _etype838; + xfer += iprot->readListBegin(_etype838, _size835); + (*(this->success)).resize(_size835); + uint32_t _i839; + for (_i839 = 0; _i839 < _size835; ++_i839) { - xfer += (*(this->success))[_i769].read(iprot); + xfer += (*(this->success))[_i839].read(iprot); } xfer += iprot->readListEnd(); } @@ -6151,14 +6151,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size770; - ::apache::thrift::protocol::TType _etype773; - xfer += iprot->readListBegin(_etype773, _size770); - this->success.resize(_size770); - uint32_t _i774; - for (_i774 = 0; _i774 < _size770; ++_i774) + uint32_t _size840; + ::apache::thrift::protocol::TType _etype843; + xfer += iprot->readListBegin(_etype843, _size840); + this->success.resize(_size840); + uint32_t _i844; + for (_i844 = 0; _i844 < _size840; ++_i844) { - xfer += iprot->readString(this->success[_i774]); + xfer += iprot->readString(this->success[_i844]); } xfer += iprot->readListEnd(); } @@ -6213,10 +6213,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter775; - for (_iter775 = this->success.begin(); _iter775 != this->success.end(); ++_iter775) + std::vector ::const_iterator _iter845; + for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) { - xfer += oprot->writeString((*_iter775)); + xfer += oprot->writeString((*_iter845)); } xfer += oprot->writeListEnd(); } @@ -6268,14 +6268,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size776; - ::apache::thrift::protocol::TType _etype779; - xfer += iprot->readListBegin(_etype779, _size776); - (*(this->success)).resize(_size776); - uint32_t _i780; - for (_i780 = 0; _i780 < _size776; ++_i780) + uint32_t _size846; + ::apache::thrift::protocol::TType _etype849; + xfer += iprot->readListBegin(_etype849, _size846); + (*(this->success)).resize(_size846); + uint32_t _i850; + for (_i850 = 0; _i850 < _size846; ++_i850) { - xfer += iprot->readString((*(this->success))[_i780]); + xfer += iprot->readString((*(this->success))[_i850]); } xfer += iprot->readListEnd(); } @@ -7603,14 +7603,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size781; - ::apache::thrift::protocol::TType _etype784; - xfer += iprot->readListBegin(_etype784, _size781); - this->new_parts.resize(_size781); - uint32_t _i785; - for (_i785 = 0; _i785 < _size781; ++_i785) + uint32_t _size851; + ::apache::thrift::protocol::TType _etype854; + xfer += iprot->readListBegin(_etype854, _size851); + this->new_parts.resize(_size851); + uint32_t _i855; + for (_i855 = 0; _i855 < _size851; ++_i855) { - xfer += this->new_parts[_i785].read(iprot); + xfer += this->new_parts[_i855].read(iprot); } xfer += iprot->readListEnd(); } @@ -7639,10 +7639,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter786; - for (_iter786 = this->new_parts.begin(); _iter786 != this->new_parts.end(); ++_iter786) + std::vector ::const_iterator _iter856; + for (_iter856 = this->new_parts.begin(); _iter856 != this->new_parts.end(); ++_iter856) { - xfer += (*_iter786).write(oprot); + xfer += (*_iter856).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7667,10 +7667,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter787; - for (_iter787 = (*(this->new_parts)).begin(); _iter787 != (*(this->new_parts)).end(); ++_iter787) + std::vector ::const_iterator _iter857; + for (_iter857 = (*(this->new_parts)).begin(); _iter857 != (*(this->new_parts)).end(); ++_iter857) { - xfer += (*_iter787).write(oprot); + xfer += (*_iter857).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7877,14 +7877,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size788; - ::apache::thrift::protocol::TType _etype791; - xfer += iprot->readListBegin(_etype791, _size788); - this->new_parts.resize(_size788); - uint32_t _i792; - for (_i792 = 0; _i792 < _size788; ++_i792) + uint32_t _size858; + ::apache::thrift::protocol::TType _etype861; + xfer += iprot->readListBegin(_etype861, _size858); + this->new_parts.resize(_size858); + uint32_t _i862; + for (_i862 = 0; _i862 < _size858; ++_i862) { - xfer += this->new_parts[_i792].read(iprot); + xfer += this->new_parts[_i862].read(iprot); } xfer += iprot->readListEnd(); } @@ -7913,10 +7913,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter793; - for (_iter793 = this->new_parts.begin(); _iter793 != this->new_parts.end(); ++_iter793) + std::vector ::const_iterator _iter863; + for (_iter863 = this->new_parts.begin(); _iter863 != this->new_parts.end(); ++_iter863) { - xfer += (*_iter793).write(oprot); + xfer += (*_iter863).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7941,10 +7941,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter794; - for (_iter794 = (*(this->new_parts)).begin(); _iter794 != (*(this->new_parts)).end(); ++_iter794) + std::vector ::const_iterator _iter864; + for (_iter864 = (*(this->new_parts)).begin(); _iter864 != (*(this->new_parts)).end(); ++_iter864) { - xfer += (*_iter794).write(oprot); + xfer += (*_iter864).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8167,14 +8167,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size795; - ::apache::thrift::protocol::TType _etype798; - xfer += iprot->readListBegin(_etype798, _size795); - this->part_vals.resize(_size795); - uint32_t _i799; - for (_i799 = 0; _i799 < _size795; ++_i799) + uint32_t _size865; + ::apache::thrift::protocol::TType _etype868; + xfer += iprot->readListBegin(_etype868, _size865); + this->part_vals.resize(_size865); + uint32_t _i869; + for (_i869 = 0; _i869 < _size865; ++_i869) { - xfer += iprot->readString(this->part_vals[_i799]); + xfer += iprot->readString(this->part_vals[_i869]); } xfer += iprot->readListEnd(); } @@ -8211,10 +8211,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter800; - for (_iter800 = this->part_vals.begin(); _iter800 != this->part_vals.end(); ++_iter800) + std::vector ::const_iterator _iter870; + for (_iter870 = this->part_vals.begin(); _iter870 != this->part_vals.end(); ++_iter870) { - xfer += oprot->writeString((*_iter800)); + xfer += oprot->writeString((*_iter870)); } xfer += oprot->writeListEnd(); } @@ -8247,10 +8247,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter801; - for (_iter801 = (*(this->part_vals)).begin(); _iter801 != (*(this->part_vals)).end(); ++_iter801) + std::vector ::const_iterator _iter871; + for (_iter871 = (*(this->part_vals)).begin(); _iter871 != (*(this->part_vals)).end(); ++_iter871) { - xfer += oprot->writeString((*_iter801)); + xfer += oprot->writeString((*_iter871)); } xfer += oprot->writeListEnd(); } @@ -8719,14 +8719,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size802; - ::apache::thrift::protocol::TType _etype805; - xfer += iprot->readListBegin(_etype805, _size802); - this->part_vals.resize(_size802); - uint32_t _i806; - for (_i806 = 0; _i806 < _size802; ++_i806) + uint32_t _size872; + ::apache::thrift::protocol::TType _etype875; + xfer += iprot->readListBegin(_etype875, _size872); + this->part_vals.resize(_size872); + uint32_t _i876; + for (_i876 = 0; _i876 < _size872; ++_i876) { - xfer += iprot->readString(this->part_vals[_i806]); + xfer += iprot->readString(this->part_vals[_i876]); } xfer += iprot->readListEnd(); } @@ -8771,10 +8771,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter807; - for (_iter807 = this->part_vals.begin(); _iter807 != this->part_vals.end(); ++_iter807) + std::vector ::const_iterator _iter877; + for (_iter877 = this->part_vals.begin(); _iter877 != this->part_vals.end(); ++_iter877) { - xfer += oprot->writeString((*_iter807)); + xfer += oprot->writeString((*_iter877)); } xfer += oprot->writeListEnd(); } @@ -8811,10 +8811,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter808; - for (_iter808 = (*(this->part_vals)).begin(); _iter808 != (*(this->part_vals)).end(); ++_iter808) + std::vector ::const_iterator _iter878; + for (_iter878 = (*(this->part_vals)).begin(); _iter878 != (*(this->part_vals)).end(); ++_iter878) { - xfer += oprot->writeString((*_iter808)); + xfer += oprot->writeString((*_iter878)); } xfer += oprot->writeListEnd(); } @@ -9613,14 +9613,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size809; - ::apache::thrift::protocol::TType _etype812; - xfer += iprot->readListBegin(_etype812, _size809); - this->part_vals.resize(_size809); - uint32_t _i813; - for (_i813 = 0; _i813 < _size809; ++_i813) + uint32_t _size879; + ::apache::thrift::protocol::TType _etype882; + xfer += iprot->readListBegin(_etype882, _size879); + this->part_vals.resize(_size879); + uint32_t _i883; + for (_i883 = 0; _i883 < _size879; ++_i883) { - xfer += iprot->readString(this->part_vals[_i813]); + xfer += iprot->readString(this->part_vals[_i883]); } xfer += iprot->readListEnd(); } @@ -9665,10 +9665,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter814; - for (_iter814 = this->part_vals.begin(); _iter814 != this->part_vals.end(); ++_iter814) + std::vector ::const_iterator _iter884; + for (_iter884 = this->part_vals.begin(); _iter884 != this->part_vals.end(); ++_iter884) { - xfer += oprot->writeString((*_iter814)); + xfer += oprot->writeString((*_iter884)); } xfer += oprot->writeListEnd(); } @@ -9705,10 +9705,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter815; - for (_iter815 = (*(this->part_vals)).begin(); _iter815 != (*(this->part_vals)).end(); ++_iter815) + std::vector ::const_iterator _iter885; + for (_iter885 = (*(this->part_vals)).begin(); _iter885 != (*(this->part_vals)).end(); ++_iter885) { - xfer += oprot->writeString((*_iter815)); + xfer += oprot->writeString((*_iter885)); } xfer += oprot->writeListEnd(); } @@ -9915,14 +9915,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size816; - ::apache::thrift::protocol::TType _etype819; - xfer += iprot->readListBegin(_etype819, _size816); - this->part_vals.resize(_size816); - uint32_t _i820; - for (_i820 = 0; _i820 < _size816; ++_i820) + uint32_t _size886; + ::apache::thrift::protocol::TType _etype889; + xfer += iprot->readListBegin(_etype889, _size886); + this->part_vals.resize(_size886); + uint32_t _i890; + for (_i890 = 0; _i890 < _size886; ++_i890) { - xfer += iprot->readString(this->part_vals[_i820]); + xfer += iprot->readString(this->part_vals[_i890]); } xfer += iprot->readListEnd(); } @@ -9975,10 +9975,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter821; - for (_iter821 = this->part_vals.begin(); _iter821 != this->part_vals.end(); ++_iter821) + std::vector ::const_iterator _iter891; + for (_iter891 = this->part_vals.begin(); _iter891 != this->part_vals.end(); ++_iter891) { - xfer += oprot->writeString((*_iter821)); + xfer += oprot->writeString((*_iter891)); } xfer += oprot->writeListEnd(); } @@ -10019,10 +10019,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter822; - for (_iter822 = (*(this->part_vals)).begin(); _iter822 != (*(this->part_vals)).end(); ++_iter822) + std::vector ::const_iterator _iter892; + for (_iter892 = (*(this->part_vals)).begin(); _iter892 != (*(this->part_vals)).end(); ++_iter892) { - xfer += oprot->writeString((*_iter822)); + xfer += oprot->writeString((*_iter892)); } xfer += oprot->writeListEnd(); } @@ -11023,14 +11023,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size823; - ::apache::thrift::protocol::TType _etype826; - xfer += iprot->readListBegin(_etype826, _size823); - this->part_vals.resize(_size823); - uint32_t _i827; - for (_i827 = 0; _i827 < _size823; ++_i827) + uint32_t _size893; + ::apache::thrift::protocol::TType _etype896; + xfer += iprot->readListBegin(_etype896, _size893); + this->part_vals.resize(_size893); + uint32_t _i897; + for (_i897 = 0; _i897 < _size893; ++_i897) { - xfer += iprot->readString(this->part_vals[_i827]); + xfer += iprot->readString(this->part_vals[_i897]); } xfer += iprot->readListEnd(); } @@ -11067,10 +11067,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter828; - for (_iter828 = this->part_vals.begin(); _iter828 != this->part_vals.end(); ++_iter828) + std::vector ::const_iterator _iter898; + for (_iter898 = this->part_vals.begin(); _iter898 != this->part_vals.end(); ++_iter898) { - xfer += oprot->writeString((*_iter828)); + xfer += oprot->writeString((*_iter898)); } xfer += oprot->writeListEnd(); } @@ -11103,10 +11103,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter829; - for (_iter829 = (*(this->part_vals)).begin(); _iter829 != (*(this->part_vals)).end(); ++_iter829) + std::vector ::const_iterator _iter899; + for (_iter899 = (*(this->part_vals)).begin(); _iter899 != (*(this->part_vals)).end(); ++_iter899) { - xfer += oprot->writeString((*_iter829)); + xfer += oprot->writeString((*_iter899)); } xfer += oprot->writeListEnd(); } @@ -11293,17 +11293,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size830; - ::apache::thrift::protocol::TType _ktype831; - ::apache::thrift::protocol::TType _vtype832; - xfer += iprot->readMapBegin(_ktype831, _vtype832, _size830); - uint32_t _i834; - for (_i834 = 0; _i834 < _size830; ++_i834) + uint32_t _size900; + ::apache::thrift::protocol::TType _ktype901; + ::apache::thrift::protocol::TType _vtype902; + xfer += iprot->readMapBegin(_ktype901, _vtype902, _size900); + uint32_t _i904; + for (_i904 = 0; _i904 < _size900; ++_i904) { - std::string _key835; - xfer += iprot->readString(_key835); - std::string& _val836 = this->partitionSpecs[_key835]; - xfer += iprot->readString(_val836); + std::string _key905; + xfer += iprot->readString(_key905); + std::string& _val906 = this->partitionSpecs[_key905]; + xfer += iprot->readString(_val906); } xfer += iprot->readMapEnd(); } @@ -11364,11 +11364,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter837; - for (_iter837 = this->partitionSpecs.begin(); _iter837 != this->partitionSpecs.end(); ++_iter837) + std::map ::const_iterator _iter907; + for (_iter907 = this->partitionSpecs.begin(); _iter907 != this->partitionSpecs.end(); ++_iter907) { - xfer += oprot->writeString(_iter837->first); - xfer += oprot->writeString(_iter837->second); + xfer += oprot->writeString(_iter907->first); + xfer += oprot->writeString(_iter907->second); } xfer += oprot->writeMapEnd(); } @@ -11409,11 +11409,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter838; - for (_iter838 = (*(this->partitionSpecs)).begin(); _iter838 != (*(this->partitionSpecs)).end(); ++_iter838) + std::map ::const_iterator _iter908; + for (_iter908 = (*(this->partitionSpecs)).begin(); _iter908 != (*(this->partitionSpecs)).end(); ++_iter908) { - xfer += oprot->writeString(_iter838->first); - xfer += oprot->writeString(_iter838->second); + xfer += oprot->writeString(_iter908->first); + xfer += oprot->writeString(_iter908->second); } xfer += oprot->writeMapEnd(); } @@ -11672,14 +11672,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size839; - ::apache::thrift::protocol::TType _etype842; - xfer += iprot->readListBegin(_etype842, _size839); - this->part_vals.resize(_size839); - uint32_t _i843; - for (_i843 = 0; _i843 < _size839; ++_i843) + uint32_t _size909; + ::apache::thrift::protocol::TType _etype912; + xfer += iprot->readListBegin(_etype912, _size909); + this->part_vals.resize(_size909); + uint32_t _i913; + for (_i913 = 0; _i913 < _size909; ++_i913) { - xfer += iprot->readString(this->part_vals[_i843]); + xfer += iprot->readString(this->part_vals[_i913]); } xfer += iprot->readListEnd(); } @@ -11700,14 +11700,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size844; - ::apache::thrift::protocol::TType _etype847; - xfer += iprot->readListBegin(_etype847, _size844); - this->group_names.resize(_size844); - uint32_t _i848; - for (_i848 = 0; _i848 < _size844; ++_i848) + uint32_t _size914; + ::apache::thrift::protocol::TType _etype917; + xfer += iprot->readListBegin(_etype917, _size914); + this->group_names.resize(_size914); + uint32_t _i918; + for (_i918 = 0; _i918 < _size914; ++_i918) { - xfer += iprot->readString(this->group_names[_i848]); + xfer += iprot->readString(this->group_names[_i918]); } xfer += iprot->readListEnd(); } @@ -11744,10 +11744,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter849; - for (_iter849 = this->part_vals.begin(); _iter849 != this->part_vals.end(); ++_iter849) + std::vector ::const_iterator _iter919; + for (_iter919 = this->part_vals.begin(); _iter919 != this->part_vals.end(); ++_iter919) { - xfer += oprot->writeString((*_iter849)); + xfer += oprot->writeString((*_iter919)); } xfer += oprot->writeListEnd(); } @@ -11760,10 +11760,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter850; - for (_iter850 = this->group_names.begin(); _iter850 != this->group_names.end(); ++_iter850) + std::vector ::const_iterator _iter920; + for (_iter920 = this->group_names.begin(); _iter920 != this->group_names.end(); ++_iter920) { - xfer += oprot->writeString((*_iter850)); + xfer += oprot->writeString((*_iter920)); } xfer += oprot->writeListEnd(); } @@ -11796,10 +11796,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter851; - for (_iter851 = (*(this->part_vals)).begin(); _iter851 != (*(this->part_vals)).end(); ++_iter851) + std::vector ::const_iterator _iter921; + for (_iter921 = (*(this->part_vals)).begin(); _iter921 != (*(this->part_vals)).end(); ++_iter921) { - xfer += oprot->writeString((*_iter851)); + xfer += oprot->writeString((*_iter921)); } xfer += oprot->writeListEnd(); } @@ -11812,10 +11812,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter852; - for (_iter852 = (*(this->group_names)).begin(); _iter852 != (*(this->group_names)).end(); ++_iter852) + std::vector ::const_iterator _iter922; + for (_iter922 = (*(this->group_names)).begin(); _iter922 != (*(this->group_names)).end(); ++_iter922) { - xfer += oprot->writeString((*_iter852)); + xfer += oprot->writeString((*_iter922)); } xfer += oprot->writeListEnd(); } @@ -12372,14 +12372,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size853; - ::apache::thrift::protocol::TType _etype856; - xfer += iprot->readListBegin(_etype856, _size853); - this->success.resize(_size853); - uint32_t _i857; - for (_i857 = 0; _i857 < _size853; ++_i857) + uint32_t _size923; + ::apache::thrift::protocol::TType _etype926; + xfer += iprot->readListBegin(_etype926, _size923); + this->success.resize(_size923); + uint32_t _i927; + for (_i927 = 0; _i927 < _size923; ++_i927) { - xfer += this->success[_i857].read(iprot); + xfer += this->success[_i927].read(iprot); } xfer += iprot->readListEnd(); } @@ -12426,10 +12426,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter858; - for (_iter858 = this->success.begin(); _iter858 != this->success.end(); ++_iter858) + std::vector ::const_iterator _iter928; + for (_iter928 = this->success.begin(); _iter928 != this->success.end(); ++_iter928) { - xfer += (*_iter858).write(oprot); + xfer += (*_iter928).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12477,14 +12477,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size859; - ::apache::thrift::protocol::TType _etype862; - xfer += iprot->readListBegin(_etype862, _size859); - (*(this->success)).resize(_size859); - uint32_t _i863; - for (_i863 = 0; _i863 < _size859; ++_i863) + uint32_t _size929; + ::apache::thrift::protocol::TType _etype932; + xfer += iprot->readListBegin(_etype932, _size929); + (*(this->success)).resize(_size929); + uint32_t _i933; + for (_i933 = 0; _i933 < _size929; ++_i933) { - xfer += (*(this->success))[_i863].read(iprot); + xfer += (*(this->success))[_i933].read(iprot); } xfer += iprot->readListEnd(); } @@ -12582,14 +12582,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size864; - ::apache::thrift::protocol::TType _etype867; - xfer += iprot->readListBegin(_etype867, _size864); - this->group_names.resize(_size864); - uint32_t _i868; - for (_i868 = 0; _i868 < _size864; ++_i868) + uint32_t _size934; + ::apache::thrift::protocol::TType _etype937; + xfer += iprot->readListBegin(_etype937, _size934); + this->group_names.resize(_size934); + uint32_t _i938; + for (_i938 = 0; _i938 < _size934; ++_i938) { - xfer += iprot->readString(this->group_names[_i868]); + xfer += iprot->readString(this->group_names[_i938]); } xfer += iprot->readListEnd(); } @@ -12634,10 +12634,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter869; - for (_iter869 = this->group_names.begin(); _iter869 != this->group_names.end(); ++_iter869) + std::vector ::const_iterator _iter939; + for (_iter939 = this->group_names.begin(); _iter939 != this->group_names.end(); ++_iter939) { - xfer += oprot->writeString((*_iter869)); + xfer += oprot->writeString((*_iter939)); } xfer += oprot->writeListEnd(); } @@ -12678,10 +12678,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter870; - for (_iter870 = (*(this->group_names)).begin(); _iter870 != (*(this->group_names)).end(); ++_iter870) + std::vector ::const_iterator _iter940; + for (_iter940 = (*(this->group_names)).begin(); _iter940 != (*(this->group_names)).end(); ++_iter940) { - xfer += oprot->writeString((*_iter870)); + xfer += oprot->writeString((*_iter940)); } xfer += oprot->writeListEnd(); } @@ -12722,14 +12722,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size871; - ::apache::thrift::protocol::TType _etype874; - xfer += iprot->readListBegin(_etype874, _size871); - this->success.resize(_size871); - uint32_t _i875; - for (_i875 = 0; _i875 < _size871; ++_i875) + uint32_t _size941; + ::apache::thrift::protocol::TType _etype944; + xfer += iprot->readListBegin(_etype944, _size941); + this->success.resize(_size941); + uint32_t _i945; + for (_i945 = 0; _i945 < _size941; ++_i945) { - xfer += this->success[_i875].read(iprot); + xfer += this->success[_i945].read(iprot); } xfer += iprot->readListEnd(); } @@ -12776,10 +12776,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter876; - for (_iter876 = this->success.begin(); _iter876 != this->success.end(); ++_iter876) + std::vector ::const_iterator _iter946; + for (_iter946 = this->success.begin(); _iter946 != this->success.end(); ++_iter946) { - xfer += (*_iter876).write(oprot); + xfer += (*_iter946).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12827,14 +12827,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size877; - ::apache::thrift::protocol::TType _etype880; - xfer += iprot->readListBegin(_etype880, _size877); - (*(this->success)).resize(_size877); - uint32_t _i881; - for (_i881 = 0; _i881 < _size877; ++_i881) + uint32_t _size947; + ::apache::thrift::protocol::TType _etype950; + xfer += iprot->readListBegin(_etype950, _size947); + (*(this->success)).resize(_size947); + uint32_t _i951; + for (_i951 = 0; _i951 < _size947; ++_i951) { - xfer += (*(this->success))[_i881].read(iprot); + xfer += (*(this->success))[_i951].read(iprot); } xfer += iprot->readListEnd(); } @@ -13012,14 +13012,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size882; - ::apache::thrift::protocol::TType _etype885; - xfer += iprot->readListBegin(_etype885, _size882); - this->success.resize(_size882); - uint32_t _i886; - for (_i886 = 0; _i886 < _size882; ++_i886) + uint32_t _size952; + ::apache::thrift::protocol::TType _etype955; + xfer += iprot->readListBegin(_etype955, _size952); + this->success.resize(_size952); + uint32_t _i956; + for (_i956 = 0; _i956 < _size952; ++_i956) { - xfer += this->success[_i886].read(iprot); + xfer += this->success[_i956].read(iprot); } xfer += iprot->readListEnd(); } @@ -13066,10 +13066,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter887; - for (_iter887 = this->success.begin(); _iter887 != this->success.end(); ++_iter887) + std::vector ::const_iterator _iter957; + for (_iter957 = this->success.begin(); _iter957 != this->success.end(); ++_iter957) { - xfer += (*_iter887).write(oprot); + xfer += (*_iter957).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13117,14 +13117,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size888; - ::apache::thrift::protocol::TType _etype891; - xfer += iprot->readListBegin(_etype891, _size888); - (*(this->success)).resize(_size888); - uint32_t _i892; - for (_i892 = 0; _i892 < _size888; ++_i892) + uint32_t _size958; + ::apache::thrift::protocol::TType _etype961; + xfer += iprot->readListBegin(_etype961, _size958); + (*(this->success)).resize(_size958); + uint32_t _i962; + for (_i962 = 0; _i962 < _size958; ++_i962) { - xfer += (*(this->success))[_i892].read(iprot); + xfer += (*(this->success))[_i962].read(iprot); } xfer += iprot->readListEnd(); } @@ -13302,14 +13302,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size893; - ::apache::thrift::protocol::TType _etype896; - xfer += iprot->readListBegin(_etype896, _size893); - this->success.resize(_size893); - uint32_t _i897; - for (_i897 = 0; _i897 < _size893; ++_i897) + uint32_t _size963; + ::apache::thrift::protocol::TType _etype966; + xfer += iprot->readListBegin(_etype966, _size963); + this->success.resize(_size963); + uint32_t _i967; + for (_i967 = 0; _i967 < _size963; ++_i967) { - xfer += iprot->readString(this->success[_i897]); + xfer += iprot->readString(this->success[_i967]); } xfer += iprot->readListEnd(); } @@ -13348,10 +13348,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter898; - for (_iter898 = this->success.begin(); _iter898 != this->success.end(); ++_iter898) + std::vector ::const_iterator _iter968; + for (_iter968 = this->success.begin(); _iter968 != this->success.end(); ++_iter968) { - xfer += oprot->writeString((*_iter898)); + xfer += oprot->writeString((*_iter968)); } xfer += oprot->writeListEnd(); } @@ -13395,14 +13395,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size899; - ::apache::thrift::protocol::TType _etype902; - xfer += iprot->readListBegin(_etype902, _size899); - (*(this->success)).resize(_size899); - uint32_t _i903; - for (_i903 = 0; _i903 < _size899; ++_i903) + uint32_t _size969; + ::apache::thrift::protocol::TType _etype972; + xfer += iprot->readListBegin(_etype972, _size969); + (*(this->success)).resize(_size969); + uint32_t _i973; + for (_i973 = 0; _i973 < _size969; ++_i973) { - xfer += iprot->readString((*(this->success))[_i903]); + xfer += iprot->readString((*(this->success))[_i973]); } xfer += iprot->readListEnd(); } @@ -13476,14 +13476,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size904; - ::apache::thrift::protocol::TType _etype907; - xfer += iprot->readListBegin(_etype907, _size904); - this->part_vals.resize(_size904); - uint32_t _i908; - for (_i908 = 0; _i908 < _size904; ++_i908) + uint32_t _size974; + ::apache::thrift::protocol::TType _etype977; + xfer += iprot->readListBegin(_etype977, _size974); + this->part_vals.resize(_size974); + uint32_t _i978; + for (_i978 = 0; _i978 < _size974; ++_i978) { - xfer += iprot->readString(this->part_vals[_i908]); + xfer += iprot->readString(this->part_vals[_i978]); } xfer += iprot->readListEnd(); } @@ -13528,10 +13528,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter909; - for (_iter909 = this->part_vals.begin(); _iter909 != this->part_vals.end(); ++_iter909) + std::vector ::const_iterator _iter979; + for (_iter979 = this->part_vals.begin(); _iter979 != this->part_vals.end(); ++_iter979) { - xfer += oprot->writeString((*_iter909)); + xfer += oprot->writeString((*_iter979)); } xfer += oprot->writeListEnd(); } @@ -13568,10 +13568,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter910; - for (_iter910 = (*(this->part_vals)).begin(); _iter910 != (*(this->part_vals)).end(); ++_iter910) + std::vector ::const_iterator _iter980; + for (_iter980 = (*(this->part_vals)).begin(); _iter980 != (*(this->part_vals)).end(); ++_iter980) { - xfer += oprot->writeString((*_iter910)); + xfer += oprot->writeString((*_iter980)); } xfer += oprot->writeListEnd(); } @@ -13616,14 +13616,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size911; - ::apache::thrift::protocol::TType _etype914; - xfer += iprot->readListBegin(_etype914, _size911); - this->success.resize(_size911); - uint32_t _i915; - for (_i915 = 0; _i915 < _size911; ++_i915) + uint32_t _size981; + ::apache::thrift::protocol::TType _etype984; + xfer += iprot->readListBegin(_etype984, _size981); + this->success.resize(_size981); + uint32_t _i985; + for (_i985 = 0; _i985 < _size981; ++_i985) { - xfer += this->success[_i915].read(iprot); + xfer += this->success[_i985].read(iprot); } xfer += iprot->readListEnd(); } @@ -13670,10 +13670,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter916; - for (_iter916 = this->success.begin(); _iter916 != this->success.end(); ++_iter916) + std::vector ::const_iterator _iter986; + for (_iter986 = this->success.begin(); _iter986 != this->success.end(); ++_iter986) { - xfer += (*_iter916).write(oprot); + xfer += (*_iter986).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13721,408 +13721,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size917; - ::apache::thrift::protocol::TType _etype920; - xfer += iprot->readListBegin(_etype920, _size917); - (*(this->success)).resize(_size917); - uint32_t _i921; - for (_i921 = 0; _i921 < _size917; ++_i921) - { - xfer += (*(this->success))[_i921].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_args::~ThriftHiveMetastore_get_partitions_ps_with_auth_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->part_vals.clear(); - uint32_t _size922; - ::apache::thrift::protocol::TType _etype925; - xfer += iprot->readListBegin(_etype925, _size922); - this->part_vals.resize(_size922); - uint32_t _i926; - for (_i926 = 0; _i926 < _size922; ++_i926) - { - xfer += iprot->readString(this->part_vals[_i926]); - } - xfer += iprot->readListEnd(); - } - this->__isset.part_vals = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I16) { - xfer += iprot->readI16(this->max_parts); - this->__isset.max_parts = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 5: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->user_name); - this->__isset.user_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 6: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->group_names.clear(); - uint32_t _size927; - ::apache::thrift::protocol::TType _etype930; - xfer += iprot->readListBegin(_etype930, _size927); - this->group_names.resize(_size927); - uint32_t _i931; - for (_i931 = 0; _i931 < _size927; ++_i931) - { - xfer += iprot->readString(this->group_names[_i931]); - } - xfer += iprot->readListEnd(); - } - this->__isset.group_names = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter932; - for (_iter932 = this->part_vals.begin(); _iter932 != this->part_vals.end(); ++_iter932) - { - xfer += oprot->writeString((*_iter932)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16(this->max_parts); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString(this->user_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter933; - for (_iter933 = this->group_names.begin(); _iter933 != this->group_names.end(); ++_iter933) - { - xfer += oprot->writeString((*_iter933)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - oprot->decrementRecursionDepth(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::~ThriftHiveMetastore_get_partitions_ps_with_auth_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter934; - for (_iter934 = (*(this->part_vals)).begin(); _iter934 != (*(this->part_vals)).end(); ++_iter934) - { - xfer += oprot->writeString((*_iter934)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16((*(this->max_parts))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString((*(this->user_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter935; - for (_iter935 = (*(this->group_names)).begin(); _iter935 != (*(this->group_names)).end(); ++_iter935) - { - xfer += oprot->writeString((*_iter935)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - oprot->decrementRecursionDepth(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_result::~ThriftHiveMetastore_get_partitions_ps_with_auth_result() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size936; - ::apache::thrift::protocol::TType _etype939; - xfer += iprot->readListBegin(_etype939, _size936); - this->success.resize(_size936); - uint32_t _i940; - for (_i940 = 0; _i940 < _size936; ++_i940) - { - xfer += this->success[_i940].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter941; - for (_iter941 = this->success.begin(); _iter941 != this->success.end(); ++_iter941) - { - xfer += (*_iter941).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partitions_ps_with_auth_presult::~ThriftHiveMetastore_get_partitions_ps_with_auth_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size942; - ::apache::thrift::protocol::TType _etype945; - xfer += iprot->readListBegin(_etype945, _size942); - (*(this->success)).resize(_size942); - uint32_t _i946; - for (_i946 = 0; _i946 < _size942; ++_i946) + uint32_t _size987; + ::apache::thrift::protocol::TType _etype990; + xfer += iprot->readListBegin(_etype990, _size987); + (*(this->success)).resize(_size987); + uint32_t _i991; + for (_i991 = 0; _i991 < _size987; ++_i991) { - xfer += (*(this->success))[_i946].read(iprot); + xfer += (*(this->success))[_i991].read(iprot); } xfer += iprot->readListEnd(); } @@ -14160,11 +13766,11 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: } -ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_args::~ThriftHiveMetastore_get_partitions_ps_with_auth_args() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14204,14 +13810,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size947; - ::apache::thrift::protocol::TType _etype950; - xfer += iprot->readListBegin(_etype950, _size947); - this->part_vals.resize(_size947); - uint32_t _i951; - for (_i951 = 0; _i951 < _size947; ++_i951) + uint32_t _size992; + ::apache::thrift::protocol::TType _etype995; + xfer += iprot->readListBegin(_etype995, _size992); + this->part_vals.resize(_size992); + uint32_t _i996; + for (_i996 = 0; _i996 < _size992; ++_i996) { - xfer += iprot->readString(this->part_vals[_i951]); + xfer += iprot->readString(this->part_vals[_i996]); } xfer += iprot->readListEnd(); } @@ -14228,6 +13834,34 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->user_name); + this->__isset.user_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->group_names.clear(); + uint32_t _size997; + ::apache::thrift::protocol::TType _etype1000; + xfer += iprot->readListBegin(_etype1000, _size997); + this->group_names.resize(_size997); + uint32_t _i1001; + for (_i1001 = 0; _i1001 < _size997; ++_i1001) + { + xfer += iprot->readString(this->group_names[_i1001]); + } + xfer += iprot->readListEnd(); + } + this->__isset.group_names = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -14240,10 +13874,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -14256,10 +13890,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter952; - for (_iter952 = this->part_vals.begin(); _iter952 != this->part_vals.end(); ++_iter952) + std::vector ::const_iterator _iter1002; + for (_iter1002 = this->part_vals.begin(); _iter1002 != this->part_vals.end(); ++_iter1002) { - xfer += oprot->writeString((*_iter952)); + xfer += oprot->writeString((*_iter1002)); } xfer += oprot->writeListEnd(); } @@ -14269,6 +13903,22 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->user_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); + std::vector ::const_iterator _iter1003; + for (_iter1003 = this->group_names.begin(); _iter1003 != this->group_names.end(); ++_iter1003) + { + xfer += oprot->writeString((*_iter1003)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); oprot->decrementRecursionDepth(); @@ -14276,14 +13926,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift } -ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::~ThriftHiveMetastore_get_partitions_ps_with_auth_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -14296,10 +13946,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter953; - for (_iter953 = (*(this->part_vals)).begin(); _iter953 != (*(this->part_vals)).end(); ++_iter953) + std::vector ::const_iterator _iter1004; + for (_iter1004 = (*(this->part_vals)).begin(); _iter1004 != (*(this->part_vals)).end(); ++_iter1004) { - xfer += oprot->writeString((*_iter953)); + xfer += oprot->writeString((*_iter1004)); } xfer += oprot->writeListEnd(); } @@ -14309,6 +13959,22 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString((*(this->user_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); + std::vector ::const_iterator _iter1005; + for (_iter1005 = (*(this->group_names)).begin(); _iter1005 != (*(this->group_names)).end(); ++_iter1005) + { + xfer += oprot->writeString((*_iter1005)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); oprot->decrementRecursionDepth(); @@ -14316,11 +13982,11 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif } -ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_result::~ThriftHiveMetastore_get_partitions_ps_with_auth_result() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14344,14 +14010,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size954; - ::apache::thrift::protocol::TType _etype957; - xfer += iprot->readListBegin(_etype957, _size954); - this->success.resize(_size954); - uint32_t _i958; - for (_i958 = 0; _i958 < _size954; ++_i958) + uint32_t _size1006; + ::apache::thrift::protocol::TType _etype1009; + xfer += iprot->readListBegin(_etype1009, _size1006); + this->success.resize(_size1006); + uint32_t _i1010; + for (_i1010 = 0; _i1010 < _size1006; ++_i1010) { - xfer += iprot->readString(this->success[_i958]); + xfer += this->success[_i1010].read(iprot); } xfer += iprot->readListEnd(); } @@ -14388,20 +14054,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter959; - for (_iter959 = this->success.begin(); _iter959 != this->success.end(); ++_iter959) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1011; + for (_iter1011 = this->success.begin(); _iter1011 != this->success.end(); ++_iter1011) { - xfer += oprot->writeString((*_iter959)); + xfer += (*_iter1011).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14421,11 +14087,11 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri } -ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { +ThriftHiveMetastore_get_partitions_ps_with_auth_presult::~ThriftHiveMetastore_get_partitions_ps_with_auth_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14449,14 +14115,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size960; - ::apache::thrift::protocol::TType _etype963; - xfer += iprot->readListBegin(_etype963, _size960); - (*(this->success)).resize(_size960); - uint32_t _i964; - for (_i964 = 0; _i964 < _size960; ++_i964) + uint32_t _size1012; + ::apache::thrift::protocol::TType _etype1015; + xfer += iprot->readListBegin(_etype1015, _size1012); + (*(this->success)).resize(_size1012); + uint32_t _i1016; + for (_i1016 = 0; _i1016 < _size1012; ++_i1016) { - xfer += iprot->readString((*(this->success))[_i964]); + xfer += (*(this->success))[_i1016].read(iprot); } xfer += iprot->readListEnd(); } @@ -14494,11 +14160,11 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri } -ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { +ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14535,9 +14201,21 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->filter); - this->__isset.filter = true; + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size1017; + ::apache::thrift::protocol::TType _etype1020; + xfer += iprot->readListBegin(_etype1020, _size1017); + this->part_vals.resize(_size1017); + uint32_t _i1021; + for (_i1021 = 0; _i1021 < _size1017; ++_i1021) + { + xfer += iprot->readString(this->part_vals[_i1021]); + } + xfer += iprot->readListEnd(); + } + this->__isset.part_vals = true; } else { xfer += iprot->skip(ftype); } @@ -14562,10 +14240,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -14575,8 +14253,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); + std::vector ::const_iterator _iter1022; + for (_iter1022 = this->part_vals.begin(); _iter1022 != this->part_vals.end(); ++_iter1022) + { + xfer += oprot->writeString((*_iter1022)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -14590,14 +14276,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -14607,8 +14293,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); + std::vector ::const_iterator _iter1023; + for (_iter1023 = (*(this->part_vals)).begin(); _iter1023 != (*(this->part_vals)).end(); ++_iter1023) + { + xfer += oprot->writeString((*_iter1023)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -14622,11 +14316,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { +ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14650,14 +14344,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size965; - ::apache::thrift::protocol::TType _etype968; - xfer += iprot->readListBegin(_etype968, _size965); - this->success.resize(_size965); - uint32_t _i969; - for (_i969 = 0; _i969 < _size965; ++_i969) + uint32_t _size1024; + ::apache::thrift::protocol::TType _etype1027; + xfer += iprot->readListBegin(_etype1027, _size1024); + this->success.resize(_size1024); + uint32_t _i1028; + for (_i1028 = 0; _i1028 < _size1024; ++_i1028) { - xfer += this->success[_i969].read(iprot); + xfer += iprot->readString(this->success[_i1028]); } xfer += iprot->readListEnd(); } @@ -14694,20 +14388,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter970; - for (_iter970 = this->success.begin(); _iter970 != this->success.end(); ++_iter970) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1029; + for (_iter1029 = this->success.begin(); _iter1029 != this->success.end(); ++_iter1029) { - xfer += (*_iter970).write(oprot); + xfer += oprot->writeString((*_iter1029)); } xfer += oprot->writeListEnd(); } @@ -14727,11 +14421,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { +ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14755,14 +14449,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size971; - ::apache::thrift::protocol::TType _etype974; - xfer += iprot->readListBegin(_etype974, _size971); - (*(this->success)).resize(_size971); - uint32_t _i975; - for (_i975 = 0; _i975 < _size971; ++_i975) + uint32_t _size1030; + ::apache::thrift::protocol::TType _etype1033; + xfer += iprot->readListBegin(_etype1033, _size1030); + (*(this->success)).resize(_size1030); + uint32_t _i1034; + for (_i1034 = 0; _i1034 < _size1030; ++_i1034) { - xfer += (*(this->success))[_i975].read(iprot); + xfer += iprot->readString((*(this->success))[_i1034]); } xfer += iprot->readListEnd(); } @@ -14800,11 +14494,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { +ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14849,8 +14543,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif } break; case 4: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32(this->max_parts); + if (ftype == ::apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->max_parts); this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); @@ -14868,10 +14562,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -14885,8 +14579,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32(this->max_parts); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -14896,14 +14590,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -14917,8 +14611,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32((*(this->max_parts))); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -14928,11 +14622,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { +ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -14956,14 +14650,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size976; - ::apache::thrift::protocol::TType _etype979; - xfer += iprot->readListBegin(_etype979, _size976); - this->success.resize(_size976); - uint32_t _i980; - for (_i980 = 0; _i980 < _size976; ++_i980) + uint32_t _size1035; + ::apache::thrift::protocol::TType _etype1038; + xfer += iprot->readListBegin(_etype1038, _size1035); + this->success.resize(_size1035); + uint32_t _i1039; + for (_i1039 = 0; _i1039 < _size1035; ++_i1039) { - xfer += this->success[_i980].read(iprot); + xfer += this->success[_i1039].read(iprot); } xfer += iprot->readListEnd(); } @@ -15000,20 +14694,20 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter981; - for (_iter981 = this->success.begin(); _iter981 != this->success.end(); ++_iter981) + std::vector ::const_iterator _iter1040; + for (_iter1040 = this->success.begin(); _iter1040 != this->success.end(); ++_iter1040) { - xfer += (*_iter981).write(oprot); + xfer += (*_iter1040).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15033,11 +14727,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { +ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -15061,14 +14755,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size982; - ::apache::thrift::protocol::TType _etype985; - xfer += iprot->readListBegin(_etype985, _size982); - (*(this->success)).resize(_size982); - uint32_t _i986; - for (_i986 = 0; _i986 < _size982; ++_i986) + uint32_t _size1041; + ::apache::thrift::protocol::TType _etype1044; + xfer += iprot->readListBegin(_etype1044, _size1041); + (*(this->success)).resize(_size1041); + uint32_t _i1045; + for (_i1045 = 0; _i1045 < _size1041; ++_i1045) { - xfer += (*(this->success))[_i986].read(iprot); + xfer += (*(this->success))[_i1045].read(iprot); } xfer += iprot->readListEnd(); } @@ -15106,11 +14800,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -15131,9 +14825,33 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->req.read(iprot); - this->__isset.req = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->filter); + this->__isset.filter = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->max_parts); + this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); } @@ -15150,13 +14868,25 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->req.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -15166,17 +14896,29 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift } -ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -15186,11 +14928,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrif } -ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -15211,8 +14953,266 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrif switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1046; + ::apache::thrift::protocol::TType _etype1049; + xfer += iprot->readListBegin(_etype1049, _size1046); + this->success.resize(_size1046); + uint32_t _i1050; + for (_i1050 = 0; _i1050 < _size1046; ++_i1050) + { + xfer += this->success[_i1050].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1051; + for (_iter1051 = this->success.begin(); _iter1051 != this->success.end(); ++_iter1051) + { + xfer += (*_iter1051).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1052; + ::apache::thrift::protocol::TType _etype1055; + xfer += iprot->readListBegin(_etype1055, _size1052); + (*(this->success)).resize(_size1052); + uint32_t _i1056; + for (_i1056 = 0; _i1056 < _size1052; ++_i1056) + { + xfer += (*(this->success))[_i1056].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -15376,14 +15376,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size987; - ::apache::thrift::protocol::TType _etype990; - xfer += iprot->readListBegin(_etype990, _size987); - this->names.resize(_size987); - uint32_t _i991; - for (_i991 = 0; _i991 < _size987; ++_i991) + uint32_t _size1057; + ::apache::thrift::protocol::TType _etype1060; + xfer += iprot->readListBegin(_etype1060, _size1057); + this->names.resize(_size1057); + uint32_t _i1061; + for (_i1061 = 0; _i1061 < _size1057; ++_i1061) { - xfer += iprot->readString(this->names[_i991]); + xfer += iprot->readString(this->names[_i1061]); } xfer += iprot->readListEnd(); } @@ -15420,10 +15420,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter992; - for (_iter992 = this->names.begin(); _iter992 != this->names.end(); ++_iter992) + std::vector ::const_iterator _iter1062; + for (_iter1062 = this->names.begin(); _iter1062 != this->names.end(); ++_iter1062) { - xfer += oprot->writeString((*_iter992)); + xfer += oprot->writeString((*_iter1062)); } xfer += oprot->writeListEnd(); } @@ -15456,10 +15456,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter993; - for (_iter993 = (*(this->names)).begin(); _iter993 != (*(this->names)).end(); ++_iter993) + std::vector ::const_iterator _iter1063; + for (_iter1063 = (*(this->names)).begin(); _iter1063 != (*(this->names)).end(); ++_iter1063) { - xfer += oprot->writeString((*_iter993)); + xfer += oprot->writeString((*_iter1063)); } xfer += oprot->writeListEnd(); } @@ -15500,14 +15500,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size994; - ::apache::thrift::protocol::TType _etype997; - xfer += iprot->readListBegin(_etype997, _size994); - this->success.resize(_size994); - uint32_t _i998; - for (_i998 = 0; _i998 < _size994; ++_i998) + uint32_t _size1064; + ::apache::thrift::protocol::TType _etype1067; + xfer += iprot->readListBegin(_etype1067, _size1064); + this->success.resize(_size1064); + uint32_t _i1068; + for (_i1068 = 0; _i1068 < _size1064; ++_i1068) { - xfer += this->success[_i998].read(iprot); + xfer += this->success[_i1068].read(iprot); } xfer += iprot->readListEnd(); } @@ -15554,10 +15554,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter999; - for (_iter999 = this->success.begin(); _iter999 != this->success.end(); ++_iter999) + std::vector ::const_iterator _iter1069; + for (_iter1069 = this->success.begin(); _iter1069 != this->success.end(); ++_iter1069) { - xfer += (*_iter999).write(oprot); + xfer += (*_iter1069).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15605,14 +15605,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1000; - ::apache::thrift::protocol::TType _etype1003; - xfer += iprot->readListBegin(_etype1003, _size1000); - (*(this->success)).resize(_size1000); - uint32_t _i1004; - for (_i1004 = 0; _i1004 < _size1000; ++_i1004) + uint32_t _size1070; + ::apache::thrift::protocol::TType _etype1073; + xfer += iprot->readListBegin(_etype1073, _size1070); + (*(this->success)).resize(_size1070); + uint32_t _i1074; + for (_i1074 = 0; _i1074 < _size1070; ++_i1074) { - xfer += (*(this->success))[_i1004].read(iprot); + xfer += (*(this->success))[_i1074].read(iprot); } xfer += iprot->readListEnd(); } @@ -15932,14 +15932,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1005; - ::apache::thrift::protocol::TType _etype1008; - xfer += iprot->readListBegin(_etype1008, _size1005); - this->new_parts.resize(_size1005); - uint32_t _i1009; - for (_i1009 = 0; _i1009 < _size1005; ++_i1009) + uint32_t _size1075; + ::apache::thrift::protocol::TType _etype1078; + xfer += iprot->readListBegin(_etype1078, _size1075); + this->new_parts.resize(_size1075); + uint32_t _i1079; + for (_i1079 = 0; _i1079 < _size1075; ++_i1079) { - xfer += this->new_parts[_i1009].read(iprot); + xfer += this->new_parts[_i1079].read(iprot); } xfer += iprot->readListEnd(); } @@ -15976,10 +15976,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1010; - for (_iter1010 = this->new_parts.begin(); _iter1010 != this->new_parts.end(); ++_iter1010) + std::vector ::const_iterator _iter1080; + for (_iter1080 = this->new_parts.begin(); _iter1080 != this->new_parts.end(); ++_iter1080) { - xfer += (*_iter1010).write(oprot); + xfer += (*_iter1080).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16012,10 +16012,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1011; - for (_iter1011 = (*(this->new_parts)).begin(); _iter1011 != (*(this->new_parts)).end(); ++_iter1011) + std::vector ::const_iterator _iter1081; + for (_iter1081 = (*(this->new_parts)).begin(); _iter1081 != (*(this->new_parts)).end(); ++_iter1081) { - xfer += (*_iter1011).write(oprot); + xfer += (*_iter1081).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16452,14 +16452,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1012; - ::apache::thrift::protocol::TType _etype1015; - xfer += iprot->readListBegin(_etype1015, _size1012); - this->part_vals.resize(_size1012); - uint32_t _i1016; - for (_i1016 = 0; _i1016 < _size1012; ++_i1016) + uint32_t _size1082; + ::apache::thrift::protocol::TType _etype1085; + xfer += iprot->readListBegin(_etype1085, _size1082); + this->part_vals.resize(_size1082); + uint32_t _i1086; + for (_i1086 = 0; _i1086 < _size1082; ++_i1086) { - xfer += iprot->readString(this->part_vals[_i1016]); + xfer += iprot->readString(this->part_vals[_i1086]); } xfer += iprot->readListEnd(); } @@ -16504,10 +16504,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1017; - for (_iter1017 = this->part_vals.begin(); _iter1017 != this->part_vals.end(); ++_iter1017) + std::vector ::const_iterator _iter1087; + for (_iter1087 = this->part_vals.begin(); _iter1087 != this->part_vals.end(); ++_iter1087) { - xfer += oprot->writeString((*_iter1017)); + xfer += oprot->writeString((*_iter1087)); } xfer += oprot->writeListEnd(); } @@ -16544,10 +16544,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1018; - for (_iter1018 = (*(this->part_vals)).begin(); _iter1018 != (*(this->part_vals)).end(); ++_iter1018) + std::vector ::const_iterator _iter1088; + for (_iter1088 = (*(this->part_vals)).begin(); _iter1088 != (*(this->part_vals)).end(); ++_iter1088) { - xfer += oprot->writeString((*_iter1018)); + xfer += oprot->writeString((*_iter1088)); } xfer += oprot->writeListEnd(); } @@ -16718,14 +16718,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1019; - ::apache::thrift::protocol::TType _etype1022; - xfer += iprot->readListBegin(_etype1022, _size1019); - this->part_vals.resize(_size1019); - uint32_t _i1023; - for (_i1023 = 0; _i1023 < _size1019; ++_i1023) + uint32_t _size1089; + ::apache::thrift::protocol::TType _etype1092; + xfer += iprot->readListBegin(_etype1092, _size1089); + this->part_vals.resize(_size1089); + uint32_t _i1093; + for (_i1093 = 0; _i1093 < _size1089; ++_i1093) { - xfer += iprot->readString(this->part_vals[_i1023]); + xfer += iprot->readString(this->part_vals[_i1093]); } xfer += iprot->readListEnd(); } @@ -16762,10 +16762,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1024; - for (_iter1024 = this->part_vals.begin(); _iter1024 != this->part_vals.end(); ++_iter1024) + std::vector ::const_iterator _iter1094; + for (_iter1094 = this->part_vals.begin(); _iter1094 != this->part_vals.end(); ++_iter1094) { - xfer += oprot->writeString((*_iter1024)); + xfer += oprot->writeString((*_iter1094)); } xfer += oprot->writeListEnd(); } @@ -16794,10 +16794,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1025; - for (_iter1025 = (*(this->part_vals)).begin(); _iter1025 != (*(this->part_vals)).end(); ++_iter1025) + std::vector ::const_iterator _iter1095; + for (_iter1095 = (*(this->part_vals)).begin(); _iter1095 != (*(this->part_vals)).end(); ++_iter1095) { - xfer += oprot->writeString((*_iter1025)); + xfer += oprot->writeString((*_iter1095)); } xfer += oprot->writeListEnd(); } @@ -17270,14 +17270,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1026; - ::apache::thrift::protocol::TType _etype1029; - xfer += iprot->readListBegin(_etype1029, _size1026); - this->success.resize(_size1026); - uint32_t _i1030; - for (_i1030 = 0; _i1030 < _size1026; ++_i1030) + uint32_t _size1096; + ::apache::thrift::protocol::TType _etype1099; + xfer += iprot->readListBegin(_etype1099, _size1096); + this->success.resize(_size1096); + uint32_t _i1100; + for (_i1100 = 0; _i1100 < _size1096; ++_i1100) { - xfer += iprot->readString(this->success[_i1030]); + xfer += iprot->readString(this->success[_i1100]); } xfer += iprot->readListEnd(); } @@ -17316,10 +17316,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1031; - for (_iter1031 = this->success.begin(); _iter1031 != this->success.end(); ++_iter1031) + std::vector ::const_iterator _iter1101; + for (_iter1101 = this->success.begin(); _iter1101 != this->success.end(); ++_iter1101) { - xfer += oprot->writeString((*_iter1031)); + xfer += oprot->writeString((*_iter1101)); } xfer += oprot->writeListEnd(); } @@ -17363,14 +17363,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1032; - ::apache::thrift::protocol::TType _etype1035; - xfer += iprot->readListBegin(_etype1035, _size1032); - (*(this->success)).resize(_size1032); - uint32_t _i1036; - for (_i1036 = 0; _i1036 < _size1032; ++_i1036) + uint32_t _size1102; + ::apache::thrift::protocol::TType _etype1105; + xfer += iprot->readListBegin(_etype1105, _size1102); + (*(this->success)).resize(_size1102); + uint32_t _i1106; + for (_i1106 = 0; _i1106 < _size1102; ++_i1106) { - xfer += iprot->readString((*(this->success))[_i1036]); + xfer += iprot->readString((*(this->success))[_i1106]); } xfer += iprot->readListEnd(); } @@ -17508,17 +17508,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1037; - ::apache::thrift::protocol::TType _ktype1038; - ::apache::thrift::protocol::TType _vtype1039; - xfer += iprot->readMapBegin(_ktype1038, _vtype1039, _size1037); - uint32_t _i1041; - for (_i1041 = 0; _i1041 < _size1037; ++_i1041) + uint32_t _size1107; + ::apache::thrift::protocol::TType _ktype1108; + ::apache::thrift::protocol::TType _vtype1109; + xfer += iprot->readMapBegin(_ktype1108, _vtype1109, _size1107); + uint32_t _i1111; + for (_i1111 = 0; _i1111 < _size1107; ++_i1111) { - std::string _key1042; - xfer += iprot->readString(_key1042); - std::string& _val1043 = this->success[_key1042]; - xfer += iprot->readString(_val1043); + std::string _key1112; + xfer += iprot->readString(_key1112); + std::string& _val1113 = this->success[_key1112]; + xfer += iprot->readString(_val1113); } xfer += iprot->readMapEnd(); } @@ -17557,11 +17557,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1044; - for (_iter1044 = this->success.begin(); _iter1044 != this->success.end(); ++_iter1044) + std::map ::const_iterator _iter1114; + for (_iter1114 = this->success.begin(); _iter1114 != this->success.end(); ++_iter1114) { - xfer += oprot->writeString(_iter1044->first); - xfer += oprot->writeString(_iter1044->second); + xfer += oprot->writeString(_iter1114->first); + xfer += oprot->writeString(_iter1114->second); } xfer += oprot->writeMapEnd(); } @@ -17605,17 +17605,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1045; - ::apache::thrift::protocol::TType _ktype1046; - ::apache::thrift::protocol::TType _vtype1047; - xfer += iprot->readMapBegin(_ktype1046, _vtype1047, _size1045); - uint32_t _i1049; - for (_i1049 = 0; _i1049 < _size1045; ++_i1049) + uint32_t _size1115; + ::apache::thrift::protocol::TType _ktype1116; + ::apache::thrift::protocol::TType _vtype1117; + xfer += iprot->readMapBegin(_ktype1116, _vtype1117, _size1115); + uint32_t _i1119; + for (_i1119 = 0; _i1119 < _size1115; ++_i1119) { - std::string _key1050; - xfer += iprot->readString(_key1050); - std::string& _val1051 = (*(this->success))[_key1050]; - xfer += iprot->readString(_val1051); + std::string _key1120; + xfer += iprot->readString(_key1120); + std::string& _val1121 = (*(this->success))[_key1120]; + xfer += iprot->readString(_val1121); } xfer += iprot->readMapEnd(); } @@ -17689,17 +17689,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1052; - ::apache::thrift::protocol::TType _ktype1053; - ::apache::thrift::protocol::TType _vtype1054; - xfer += iprot->readMapBegin(_ktype1053, _vtype1054, _size1052); - uint32_t _i1056; - for (_i1056 = 0; _i1056 < _size1052; ++_i1056) + uint32_t _size1122; + ::apache::thrift::protocol::TType _ktype1123; + ::apache::thrift::protocol::TType _vtype1124; + xfer += iprot->readMapBegin(_ktype1123, _vtype1124, _size1122); + uint32_t _i1126; + for (_i1126 = 0; _i1126 < _size1122; ++_i1126) { - std::string _key1057; - xfer += iprot->readString(_key1057); - std::string& _val1058 = this->part_vals[_key1057]; - xfer += iprot->readString(_val1058); + std::string _key1127; + xfer += iprot->readString(_key1127); + std::string& _val1128 = this->part_vals[_key1127]; + xfer += iprot->readString(_val1128); } xfer += iprot->readMapEnd(); } @@ -17710,9 +17710,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1059; - xfer += iprot->readI32(ecast1059); - this->eventType = (PartitionEventType::type)ecast1059; + int32_t ecast1129; + xfer += iprot->readI32(ecast1129); + this->eventType = (PartitionEventType::type)ecast1129; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -17746,11 +17746,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1060; - for (_iter1060 = this->part_vals.begin(); _iter1060 != this->part_vals.end(); ++_iter1060) + std::map ::const_iterator _iter1130; + for (_iter1130 = this->part_vals.begin(); _iter1130 != this->part_vals.end(); ++_iter1130) { - xfer += oprot->writeString(_iter1060->first); - xfer += oprot->writeString(_iter1060->second); + xfer += oprot->writeString(_iter1130->first); + xfer += oprot->writeString(_iter1130->second); } xfer += oprot->writeMapEnd(); } @@ -17787,11 +17787,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1061; - for (_iter1061 = (*(this->part_vals)).begin(); _iter1061 != (*(this->part_vals)).end(); ++_iter1061) + std::map ::const_iterator _iter1131; + for (_iter1131 = (*(this->part_vals)).begin(); _iter1131 != (*(this->part_vals)).end(); ++_iter1131) { - xfer += oprot->writeString(_iter1061->first); - xfer += oprot->writeString(_iter1061->second); + xfer += oprot->writeString(_iter1131->first); + xfer += oprot->writeString(_iter1131->second); } xfer += oprot->writeMapEnd(); } @@ -18058,17 +18058,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1062; - ::apache::thrift::protocol::TType _ktype1063; - ::apache::thrift::protocol::TType _vtype1064; - xfer += iprot->readMapBegin(_ktype1063, _vtype1064, _size1062); - uint32_t _i1066; - for (_i1066 = 0; _i1066 < _size1062; ++_i1066) + uint32_t _size1132; + ::apache::thrift::protocol::TType _ktype1133; + ::apache::thrift::protocol::TType _vtype1134; + xfer += iprot->readMapBegin(_ktype1133, _vtype1134, _size1132); + uint32_t _i1136; + for (_i1136 = 0; _i1136 < _size1132; ++_i1136) { - std::string _key1067; - xfer += iprot->readString(_key1067); - std::string& _val1068 = this->part_vals[_key1067]; - xfer += iprot->readString(_val1068); + std::string _key1137; + xfer += iprot->readString(_key1137); + std::string& _val1138 = this->part_vals[_key1137]; + xfer += iprot->readString(_val1138); } xfer += iprot->readMapEnd(); } @@ -18079,9 +18079,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1069; - xfer += iprot->readI32(ecast1069); - this->eventType = (PartitionEventType::type)ecast1069; + int32_t ecast1139; + xfer += iprot->readI32(ecast1139); + this->eventType = (PartitionEventType::type)ecast1139; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -18115,11 +18115,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1070; - for (_iter1070 = this->part_vals.begin(); _iter1070 != this->part_vals.end(); ++_iter1070) + std::map ::const_iterator _iter1140; + for (_iter1140 = this->part_vals.begin(); _iter1140 != this->part_vals.end(); ++_iter1140) { - xfer += oprot->writeString(_iter1070->first); - xfer += oprot->writeString(_iter1070->second); + xfer += oprot->writeString(_iter1140->first); + xfer += oprot->writeString(_iter1140->second); } xfer += oprot->writeMapEnd(); } @@ -18156,11 +18156,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1071; - for (_iter1071 = (*(this->part_vals)).begin(); _iter1071 != (*(this->part_vals)).end(); ++_iter1071) + std::map ::const_iterator _iter1141; + for (_iter1141 = (*(this->part_vals)).begin(); _iter1141 != (*(this->part_vals)).end(); ++_iter1141) { - xfer += oprot->writeString(_iter1071->first); - xfer += oprot->writeString(_iter1071->second); + xfer += oprot->writeString(_iter1141->first); + xfer += oprot->writeString(_iter1141->second); } xfer += oprot->writeMapEnd(); } @@ -19591,14 +19591,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1072; - ::apache::thrift::protocol::TType _etype1075; - xfer += iprot->readListBegin(_etype1075, _size1072); - this->success.resize(_size1072); - uint32_t _i1076; - for (_i1076 = 0; _i1076 < _size1072; ++_i1076) + uint32_t _size1142; + ::apache::thrift::protocol::TType _etype1145; + xfer += iprot->readListBegin(_etype1145, _size1142); + this->success.resize(_size1142); + uint32_t _i1146; + for (_i1146 = 0; _i1146 < _size1142; ++_i1146) { - xfer += this->success[_i1076].read(iprot); + xfer += this->success[_i1146].read(iprot); } xfer += iprot->readListEnd(); } @@ -19645,10 +19645,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1077; - for (_iter1077 = this->success.begin(); _iter1077 != this->success.end(); ++_iter1077) + std::vector ::const_iterator _iter1147; + for (_iter1147 = this->success.begin(); _iter1147 != this->success.end(); ++_iter1147) { - xfer += (*_iter1077).write(oprot); + xfer += (*_iter1147).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19696,14 +19696,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1078; - ::apache::thrift::protocol::TType _etype1081; - xfer += iprot->readListBegin(_etype1081, _size1078); - (*(this->success)).resize(_size1078); - uint32_t _i1082; - for (_i1082 = 0; _i1082 < _size1078; ++_i1082) + uint32_t _size1148; + ::apache::thrift::protocol::TType _etype1151; + xfer += iprot->readListBegin(_etype1151, _size1148); + (*(this->success)).resize(_size1148); + uint32_t _i1152; + for (_i1152 = 0; _i1152 < _size1148; ++_i1152) { - xfer += (*(this->success))[_i1082].read(iprot); + xfer += (*(this->success))[_i1152].read(iprot); } xfer += iprot->readListEnd(); } @@ -19881,14 +19881,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1083; - ::apache::thrift::protocol::TType _etype1086; - xfer += iprot->readListBegin(_etype1086, _size1083); - this->success.resize(_size1083); - uint32_t _i1087; - for (_i1087 = 0; _i1087 < _size1083; ++_i1087) + uint32_t _size1153; + ::apache::thrift::protocol::TType _etype1156; + xfer += iprot->readListBegin(_etype1156, _size1153); + this->success.resize(_size1153); + uint32_t _i1157; + for (_i1157 = 0; _i1157 < _size1153; ++_i1157) { - xfer += iprot->readString(this->success[_i1087]); + xfer += iprot->readString(this->success[_i1157]); } xfer += iprot->readListEnd(); } @@ -19927,10 +19927,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1088; - for (_iter1088 = this->success.begin(); _iter1088 != this->success.end(); ++_iter1088) + std::vector ::const_iterator _iter1158; + for (_iter1158 = this->success.begin(); _iter1158 != this->success.end(); ++_iter1158) { - xfer += oprot->writeString((*_iter1088)); + xfer += oprot->writeString((*_iter1158)); } xfer += oprot->writeListEnd(); } @@ -19974,14 +19974,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1089; - ::apache::thrift::protocol::TType _etype1092; - xfer += iprot->readListBegin(_etype1092, _size1089); - (*(this->success)).resize(_size1089); - uint32_t _i1093; - for (_i1093 = 0; _i1093 < _size1089; ++_i1093) + uint32_t _size1159; + ::apache::thrift::protocol::TType _etype1162; + xfer += iprot->readListBegin(_etype1162, _size1159); + (*(this->success)).resize(_size1159); + uint32_t _i1163; + for (_i1163 = 0; _i1163 < _size1159; ++_i1163) { - xfer += iprot->readString((*(this->success))[_i1093]); + xfer += iprot->readString((*(this->success))[_i1163]); } xfer += iprot->readListEnd(); } @@ -23541,14 +23541,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1094; - ::apache::thrift::protocol::TType _etype1097; - xfer += iprot->readListBegin(_etype1097, _size1094); - this->success.resize(_size1094); - uint32_t _i1098; - for (_i1098 = 0; _i1098 < _size1094; ++_i1098) + uint32_t _size1164; + ::apache::thrift::protocol::TType _etype1167; + xfer += iprot->readListBegin(_etype1167, _size1164); + this->success.resize(_size1164); + uint32_t _i1168; + for (_i1168 = 0; _i1168 < _size1164; ++_i1168) { - xfer += iprot->readString(this->success[_i1098]); + xfer += iprot->readString(this->success[_i1168]); } xfer += iprot->readListEnd(); } @@ -23587,10 +23587,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1099; - for (_iter1099 = this->success.begin(); _iter1099 != this->success.end(); ++_iter1099) + std::vector ::const_iterator _iter1169; + for (_iter1169 = this->success.begin(); _iter1169 != this->success.end(); ++_iter1169) { - xfer += oprot->writeString((*_iter1099)); + xfer += oprot->writeString((*_iter1169)); } xfer += oprot->writeListEnd(); } @@ -23634,14 +23634,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1100; - ::apache::thrift::protocol::TType _etype1103; - xfer += iprot->readListBegin(_etype1103, _size1100); - (*(this->success)).resize(_size1100); - uint32_t _i1104; - for (_i1104 = 0; _i1104 < _size1100; ++_i1104) + uint32_t _size1170; + ::apache::thrift::protocol::TType _etype1173; + xfer += iprot->readListBegin(_etype1173, _size1170); + (*(this->success)).resize(_size1170); + uint32_t _i1174; + for (_i1174 = 0; _i1174 < _size1170; ++_i1174) { - xfer += iprot->readString((*(this->success))[_i1104]); + xfer += iprot->readString((*(this->success))[_i1174]); } xfer += iprot->readListEnd(); } @@ -23792,8 +23792,822 @@ uint32_t ThriftHiveMetastore_get_function_result::read(::apache::thrift::protoco switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_function_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_function_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_function_presult::~ThriftHiveMetastore_get_function_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_function_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_all_functions_args::~ThriftHiveMetastore_get_all_functions_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_functions_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_functions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_all_functions_pargs::~ThriftHiveMetastore_get_all_functions_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_functions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_all_functions_result::~ThriftHiveMetastore_get_all_functions_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_functions_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_all_functions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_all_functions_presult::~ThriftHiveMetastore_get_all_functions_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_all_functions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_create_role_args::~ThriftHiveMetastore_create_role_args() throw() { +} + + +uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->role.read(iprot); + this->__isset.role = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_args"); + + xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->role.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_create_role_pargs::~ThriftHiveMetastore_create_role_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_create_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_pargs"); + + xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->role)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_create_role_result::~ThriftHiveMetastore_create_role_result() throw() { +} + + +uint32_t ThriftHiveMetastore_create_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_role_presult::~ThriftHiveMetastore_create_role_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_create_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_drop_role_args::~ThriftHiveMetastore_drop_role_args() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->role_name); + this->__isset.role_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_args"); + + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->role_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_drop_role_pargs::~ThriftHiveMetastore_drop_role_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_pargs"); + + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->role_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_drop_role_result::~ThriftHiveMetastore_drop_role_result() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_drop_role_presult::~ThriftHiveMetastore_drop_role_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_role_names_args::~ThriftHiveMetastore_get_role_names_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_role_names_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_role_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_role_names_pargs::~ThriftHiveMetastore_get_role_names_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_role_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_get_role_names_result::~ThriftHiveMetastore_get_role_names_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1175; + ::apache::thrift::protocol::TType _etype1178; + xfer += iprot->readListBegin(_etype1178, _size1175); + this->success.resize(_size1175); + uint32_t _i1179; + for (_i1179 = 0; _i1179 < _size1175; ++_i1179) + { + xfer += iprot->readString(this->success[_i1179]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -23807,14 +24621,6 @@ uint32_t ThriftHiveMetastore_get_function_result::read(::apache::thrift::protoco xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -23827,24 +24633,28 @@ uint32_t ThriftHiveMetastore_get_function_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_function_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_function_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1180; + for (_iter1180 = this->success.begin(); _iter1180 != this->success.end(); ++_iter1180) + { + xfer += oprot->writeString((*_iter1180)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -23852,11 +24662,11 @@ uint32_t ThriftHiveMetastore_get_function_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_function_presult::~ThriftHiveMetastore_get_function_presult() throw() { +ThriftHiveMetastore_get_role_names_presult::~ThriftHiveMetastore_get_role_names_presult() throw() { } -uint32_t ThriftHiveMetastore_get_function_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -23877,8 +24687,20 @@ uint32_t ThriftHiveMetastore_get_function_presult::read(::apache::thrift::protoc switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1181; + ::apache::thrift::protocol::TType _etype1184; + xfer += iprot->readListBegin(_etype1184, _size1181); + (*(this->success)).resize(_size1181); + uint32_t _i1185; + for (_i1185 = 0; _i1185 < _size1181; ++_i1185) + { + xfer += iprot->readString((*(this->success))[_i1185]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -23892,14 +24714,6 @@ uint32_t ThriftHiveMetastore_get_function_presult::read(::apache::thrift::protoc xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -23913,11 +24727,11 @@ uint32_t ThriftHiveMetastore_get_function_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_get_all_functions_args::~ThriftHiveMetastore_get_all_functions_args() throw() { +ThriftHiveMetastore_grant_role_args::~ThriftHiveMetastore_grant_role_args() throw() { } -uint32_t ThriftHiveMetastore_get_all_functions_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -23935,7 +24749,64 @@ uint32_t ThriftHiveMetastore_get_all_functions_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->role_name); + this->__isset.role_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->principal_name); + this->__isset.principal_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast1186; + xfer += iprot->readI32(ecast1186); + this->principal_type = (PrincipalType::type)ecast1186; + this->__isset.principal_type = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->grantor); + this->__isset.grantor = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast1187; + xfer += iprot->readI32(ecast1187); + this->grantorType = (PrincipalType::type)ecast1187; + this->__isset.grantorType = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->grant_option); + this->__isset.grant_option = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -23944,10 +24815,34 @@ uint32_t ThriftHiveMetastore_get_all_functions_args::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_get_all_functions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_args"); + + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->role_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->principal_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32((int32_t)this->principal_type); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->grantor); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32((int32_t)this->grantorType); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeBool(this->grant_option); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -23956,14 +24851,38 @@ uint32_t ThriftHiveMetastore_get_all_functions_args::write(::apache::thrift::pro } -ThriftHiveMetastore_get_all_functions_pargs::~ThriftHiveMetastore_get_all_functions_pargs() throw() { +ThriftHiveMetastore_grant_role_pargs::~ThriftHiveMetastore_grant_role_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_all_functions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_pargs"); + + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->role_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->principal_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32((int32_t)(*(this->principal_type))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString((*(this->grantor))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32((int32_t)(*(this->grantorType))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeBool((*(this->grant_option))); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -23972,11 +24891,11 @@ uint32_t ThriftHiveMetastore_get_all_functions_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_get_all_functions_result::~ThriftHiveMetastore_get_all_functions_result() throw() { +ThriftHiveMetastore_grant_role_result::~ThriftHiveMetastore_grant_role_result() throw() { } -uint32_t ThriftHiveMetastore_get_all_functions_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -23997,8 +24916,8 @@ uint32_t ThriftHiveMetastore_get_all_functions_result::read(::apache::thrift::pr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24024,15 +24943,15 @@ uint32_t ThriftHiveMetastore_get_all_functions_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_get_all_functions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_functions_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -24045,11 +24964,11 @@ uint32_t ThriftHiveMetastore_get_all_functions_result::write(::apache::thrift::p } -ThriftHiveMetastore_get_all_functions_presult::~ThriftHiveMetastore_get_all_functions_presult() throw() { +ThriftHiveMetastore_grant_role_presult::~ThriftHiveMetastore_grant_role_presult() throw() { } -uint32_t ThriftHiveMetastore_get_all_functions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24070,8 +24989,8 @@ uint32_t ThriftHiveMetastore_get_all_functions_presult::read(::apache::thrift::p switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24098,11 +25017,11 @@ uint32_t ThriftHiveMetastore_get_all_functions_presult::read(::apache::thrift::p } -ThriftHiveMetastore_create_role_args::~ThriftHiveMetastore_create_role_args() throw() { +ThriftHiveMetastore_revoke_role_args::~ThriftHiveMetastore_revoke_role_args() throw() { } -uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24123,9 +25042,27 @@ uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol:: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->role.read(iprot); - this->__isset.role = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->role_name); + this->__isset.role_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->principal_name); + this->__isset.principal_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast1188; + xfer += iprot->readI32(ecast1188); + this->principal_type = (PrincipalType::type)ecast1188; + this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); } @@ -24142,13 +25079,21 @@ uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_create_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_args"); - xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->role.write(oprot); + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->role_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->principal_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24158,17 +25103,25 @@ uint32_t ThriftHiveMetastore_create_role_args::write(::apache::thrift::protocol: } -ThriftHiveMetastore_create_role_pargs::~ThriftHiveMetastore_create_role_pargs() throw() { +ThriftHiveMetastore_revoke_role_pargs::~ThriftHiveMetastore_revoke_role_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_pargs"); - xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->role)).write(oprot); + xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->role_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->principal_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24178,11 +25131,11 @@ uint32_t ThriftHiveMetastore_create_role_pargs::write(::apache::thrift::protocol } -ThriftHiveMetastore_create_role_result::~ThriftHiveMetastore_create_role_result() throw() { +ThriftHiveMetastore_revoke_role_result::~ThriftHiveMetastore_revoke_role_result() throw() { } -uint32_t ThriftHiveMetastore_create_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24230,11 +25183,11 @@ uint32_t ThriftHiveMetastore_create_role_result::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_create_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); @@ -24251,11 +25204,11 @@ uint32_t ThriftHiveMetastore_create_role_result::write(::apache::thrift::protoco } -ThriftHiveMetastore_create_role_presult::~ThriftHiveMetastore_create_role_presult() throw() { +ThriftHiveMetastore_revoke_role_presult::~ThriftHiveMetastore_revoke_role_presult() throw() { } -uint32_t ThriftHiveMetastore_create_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24304,11 +25257,11 @@ uint32_t ThriftHiveMetastore_create_role_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_drop_role_args::~ThriftHiveMetastore_drop_role_args() throw() { +ThriftHiveMetastore_list_roles_args::~ThriftHiveMetastore_list_roles_args() throw() { } -uint32_t ThriftHiveMetastore_drop_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24330,8 +25283,18 @@ uint32_t ThriftHiveMetastore_drop_role_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->role_name); - this->__isset.role_name = true; + xfer += iprot->readString(this->principal_name); + this->__isset.principal_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast1189; + xfer += iprot->readI32(ecast1189); + this->principal_type = (PrincipalType::type)ecast1189; + this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); } @@ -24348,13 +25311,17 @@ uint32_t ThriftHiveMetastore_drop_role_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_drop_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_roles_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_args"); - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->role_name); + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->principal_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24364,17 +25331,21 @@ uint32_t ThriftHiveMetastore_drop_role_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_drop_role_pargs::~ThriftHiveMetastore_drop_role_pargs() throw() { +ThriftHiveMetastore_list_roles_pargs::~ThriftHiveMetastore_list_roles_pargs() throw() { } -uint32_t ThriftHiveMetastore_drop_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_roles_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_pargs"); - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->role_name))); + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->principal_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24384,11 +25355,11 @@ uint32_t ThriftHiveMetastore_drop_role_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_drop_role_result::~ThriftHiveMetastore_drop_role_result() throw() { +ThriftHiveMetastore_list_roles_result::~ThriftHiveMetastore_list_roles_result() throw() { } -uint32_t ThriftHiveMetastore_drop_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24409,8 +25380,20 @@ uint32_t ThriftHiveMetastore_drop_role_result::read(::apache::thrift::protocol:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1190; + ::apache::thrift::protocol::TType _etype1193; + xfer += iprot->readListBegin(_etype1193, _size1190); + this->success.resize(_size1190); + uint32_t _i1194; + for (_i1194 = 0; _i1194 < _size1190; ++_i1194) + { + xfer += this->success[_i1194].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24436,15 +25419,23 @@ uint32_t ThriftHiveMetastore_drop_role_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_drop_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1195; + for (_iter1195 = this->success.begin(); _iter1195 != this->success.end(); ++_iter1195) + { + xfer += (*_iter1195).write(oprot); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -24457,11 +25448,11 @@ uint32_t ThriftHiveMetastore_drop_role_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_drop_role_presult::~ThriftHiveMetastore_drop_role_presult() throw() { +ThriftHiveMetastore_list_roles_presult::~ThriftHiveMetastore_list_roles_presult() throw() { } -uint32_t ThriftHiveMetastore_drop_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24482,8 +25473,20 @@ uint32_t ThriftHiveMetastore_drop_role_presult::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1196; + ::apache::thrift::protocol::TType _etype1199; + xfer += iprot->readListBegin(_etype1199, _size1196); + (*(this->success)).resize(_size1196); + uint32_t _i1200; + for (_i1200 = 0; _i1200 < _size1196; ++_i1200) + { + xfer += (*(this->success))[_i1200].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24510,11 +25513,11 @@ uint32_t ThriftHiveMetastore_drop_role_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_get_role_names_args::~ThriftHiveMetastore_get_role_names_args() throw() { +ThriftHiveMetastore_grant_revoke_role_args::~ThriftHiveMetastore_grant_revoke_role_args() throw() { } -uint32_t ThriftHiveMetastore_get_role_names_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24532,7 +25535,20 @@ uint32_t ThriftHiveMetastore_get_role_names_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -24541,10 +25557,14 @@ uint32_t ThriftHiveMetastore_get_role_names_args::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_role_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -24553,14 +25573,18 @@ uint32_t ThriftHiveMetastore_get_role_names_args::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_role_names_pargs::~ThriftHiveMetastore_get_role_names_pargs() throw() { +ThriftHiveMetastore_grant_revoke_role_pargs::~ThriftHiveMetastore_grant_revoke_role_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_role_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -24569,11 +25593,11 @@ uint32_t ThriftHiveMetastore_get_role_names_pargs::write(::apache::thrift::proto } -ThriftHiveMetastore_get_role_names_result::~ThriftHiveMetastore_get_role_names_result() throw() { +ThriftHiveMetastore_grant_revoke_role_result::~ThriftHiveMetastore_grant_revoke_role_result() throw() { } -uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24594,20 +25618,8 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1105; - ::apache::thrift::protocol::TType _etype1108; - xfer += iprot->readListBegin(_etype1108, _size1105); - this->success.resize(_size1105); - uint32_t _i1109; - for (_i1109 = 0; _i1109 < _size1105; ++_i1109) - { - xfer += iprot->readString(this->success[_i1109]); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24633,23 +25645,15 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto return xfer; } -uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1110; - for (_iter1110 = this->success.begin(); _iter1110 != this->success.end(); ++_iter1110) - { - xfer += oprot->writeString((*_iter1110)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -24662,11 +25666,11 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot } -ThriftHiveMetastore_get_role_names_presult::~ThriftHiveMetastore_get_role_names_presult() throw() { +ThriftHiveMetastore_grant_revoke_role_presult::~ThriftHiveMetastore_grant_revoke_role_presult() throw() { } -uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24687,20 +25691,8 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1111; - ::apache::thrift::protocol::TType _etype1114; - xfer += iprot->readListBegin(_etype1114, _size1111); - (*(this->success)).resize(_size1111); - uint32_t _i1115; - for (_i1115 = 0; _i1115 < _size1111; ++_i1115) - { - xfer += iprot->readString((*(this->success))[_i1115]); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24727,11 +25719,11 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot } -ThriftHiveMetastore_grant_role_args::~ThriftHiveMetastore_grant_role_args() throw() { +ThriftHiveMetastore_get_principals_in_role_args::~ThriftHiveMetastore_get_principals_in_role_args() throw() { } -uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_principals_in_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24752,53 +25744,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->role_name); - this->__isset.role_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->principal_name); - this->__isset.principal_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1116; - xfer += iprot->readI32(ecast1116); - this->principal_type = (PrincipalType::type)ecast1116; - this->__isset.principal_type = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->grantor); - this->__isset.grantor = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 5: - if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1117; - xfer += iprot->readI32(ecast1117); - this->grantorType = (PrincipalType::type)ecast1117; - this->__isset.grantorType = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 6: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->grant_option); - this->__isset.grant_option = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; } else { xfer += iprot->skip(ftype); } @@ -24815,33 +25763,13 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_grant_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_principals_in_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_args"); - - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->role_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->principal_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); - xfer += oprot->writeI32((int32_t)this->principal_type); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->grantor); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); - xfer += oprot->writeI32((int32_t)this->grantorType); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_args"); - xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); - xfer += oprot->writeBool(this->grant_option); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24851,37 +25779,17 @@ uint32_t ThriftHiveMetastore_grant_role_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_grant_role_pargs::~ThriftHiveMetastore_grant_role_pargs() throw() { +ThriftHiveMetastore_get_principals_in_role_pargs::~ThriftHiveMetastore_get_principals_in_role_pargs() throw() { } -uint32_t ThriftHiveMetastore_grant_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_principals_in_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_pargs"); - - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->role_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->principal_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); - xfer += oprot->writeI32((int32_t)(*(this->principal_type))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString((*(this->grantor))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); - xfer += oprot->writeI32((int32_t)(*(this->grantorType))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_pargs"); - xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); - xfer += oprot->writeBool((*(this->grant_option))); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -24891,11 +25799,11 @@ uint32_t ThriftHiveMetastore_grant_role_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_grant_role_result::~ThriftHiveMetastore_grant_role_result() throw() { +ThriftHiveMetastore_get_principals_in_role_result::~ThriftHiveMetastore_get_principals_in_role_result() throw() { } -uint32_t ThriftHiveMetastore_grant_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_principals_in_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24916,8 +25824,8 @@ uint32_t ThriftHiveMetastore_grant_role_result::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -24943,15 +25851,15 @@ uint32_t ThriftHiveMetastore_grant_role_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_grant_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_principals_in_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -24964,11 +25872,11 @@ uint32_t ThriftHiveMetastore_grant_role_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_grant_role_presult::~ThriftHiveMetastore_grant_role_presult() throw() { +ThriftHiveMetastore_get_principals_in_role_presult::~ThriftHiveMetastore_get_principals_in_role_presult() throw() { } -uint32_t ThriftHiveMetastore_grant_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_principals_in_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -24989,8 +25897,8 @@ uint32_t ThriftHiveMetastore_grant_role_presult::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25017,11 +25925,11 @@ uint32_t ThriftHiveMetastore_grant_role_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_revoke_role_args::~ThriftHiveMetastore_revoke_role_args() throw() { +ThriftHiveMetastore_get_role_grants_for_principal_args::~ThriftHiveMetastore_get_role_grants_for_principal_args() throw() { } -uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25042,27 +25950,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->role_name); - this->__isset.role_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->principal_name); - this->__isset.principal_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1118; - xfer += iprot->readI32(ecast1118); - this->principal_type = (PrincipalType::type)ecast1118; - this->__isset.principal_type = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; } else { xfer += iprot->skip(ftype); } @@ -25079,21 +25969,13 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_args"); - - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->role_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->principal_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_args"); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); - xfer += oprot->writeI32((int32_t)this->principal_type); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25103,25 +25985,17 @@ uint32_t ThriftHiveMetastore_revoke_role_args::write(::apache::thrift::protocol: } -ThriftHiveMetastore_revoke_role_pargs::~ThriftHiveMetastore_revoke_role_pargs() throw() { +ThriftHiveMetastore_get_role_grants_for_principal_pargs::~ThriftHiveMetastore_get_role_grants_for_principal_pargs() throw() { } -uint32_t ThriftHiveMetastore_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_pargs"); - - xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->role_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->principal_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_pargs"); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); - xfer += oprot->writeI32((int32_t)(*(this->principal_type))); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25131,11 +26005,11 @@ uint32_t ThriftHiveMetastore_revoke_role_pargs::write(::apache::thrift::protocol } -ThriftHiveMetastore_revoke_role_result::~ThriftHiveMetastore_revoke_role_result() throw() { +ThriftHiveMetastore_get_role_grants_for_principal_result::~ThriftHiveMetastore_get_role_grants_for_principal_result() throw() { } -uint32_t ThriftHiveMetastore_revoke_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25156,8 +26030,8 @@ uint32_t ThriftHiveMetastore_revoke_role_result::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25183,15 +26057,15 @@ uint32_t ThriftHiveMetastore_revoke_role_result::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_revoke_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -25204,11 +26078,11 @@ uint32_t ThriftHiveMetastore_revoke_role_result::write(::apache::thrift::protoco } -ThriftHiveMetastore_revoke_role_presult::~ThriftHiveMetastore_revoke_role_presult() throw() { +ThriftHiveMetastore_get_role_grants_for_principal_presult::~ThriftHiveMetastore_get_role_grants_for_principal_presult() throw() { } -uint32_t ThriftHiveMetastore_revoke_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_role_grants_for_principal_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25229,8 +26103,8 @@ uint32_t ThriftHiveMetastore_revoke_role_presult::read(::apache::thrift::protoco switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25257,11 +26131,11 @@ uint32_t ThriftHiveMetastore_revoke_role_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_list_roles_args::~ThriftHiveMetastore_list_roles_args() throw() { +ThriftHiveMetastore_get_privilege_set_args::~ThriftHiveMetastore_get_privilege_set_args() throw() { } -uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25282,19 +26156,37 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->principal_name); - this->__isset.principal_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->hiveObject.read(iprot); + this->__isset.hiveObject = true; } else { xfer += iprot->skip(ftype); } break; case 2: - if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1119; - xfer += iprot->readI32(ecast1119); - this->principal_type = (PrincipalType::type)ecast1119; - this->__isset.principal_type = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->user_name); + this->__isset.user_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->group_names.clear(); + uint32_t _size1201; + ::apache::thrift::protocol::TType _etype1204; + xfer += iprot->readListBegin(_etype1204, _size1201); + this->group_names.resize(_size1201); + uint32_t _i1205; + for (_i1205 = 0; _i1205 < _size1201; ++_i1205) + { + xfer += iprot->readString(this->group_names[_i1205]); + } + xfer += iprot->readListEnd(); + } + this->__isset.group_names = true; } else { xfer += iprot->skip(ftype); } @@ -25311,17 +26203,29 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_list_roles_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_args"); - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->principal_name); + xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->hiveObject.write(oprot); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); - xfer += oprot->writeI32((int32_t)this->principal_type); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->user_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); + std::vector ::const_iterator _iter1206; + for (_iter1206 = this->group_names.begin(); _iter1206 != this->group_names.end(); ++_iter1206) + { + xfer += oprot->writeString((*_iter1206)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25331,21 +26235,33 @@ uint32_t ThriftHiveMetastore_list_roles_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_list_roles_pargs::~ThriftHiveMetastore_list_roles_pargs() throw() { +ThriftHiveMetastore_get_privilege_set_pargs::~ThriftHiveMetastore_get_privilege_set_pargs() throw() { } -uint32_t ThriftHiveMetastore_list_roles_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_pargs"); - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->principal_name))); + xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->hiveObject)).write(oprot); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); - xfer += oprot->writeI32((int32_t)(*(this->principal_type))); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->user_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); + std::vector ::const_iterator _iter1207; + for (_iter1207 = (*(this->group_names)).begin(); _iter1207 != (*(this->group_names)).end(); ++_iter1207) + { + xfer += oprot->writeString((*_iter1207)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25355,11 +26271,11 @@ uint32_t ThriftHiveMetastore_list_roles_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_list_roles_result::~ThriftHiveMetastore_list_roles_result() throw() { +ThriftHiveMetastore_get_privilege_set_result::~ThriftHiveMetastore_get_privilege_set_result() throw() { } -uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_privilege_set_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25380,20 +26296,8 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1120; - ::apache::thrift::protocol::TType _etype1123; - xfer += iprot->readListBegin(_etype1123, _size1120); - this->success.resize(_size1120); - uint32_t _i1124; - for (_i1124 = 0; _i1124 < _size1120; ++_i1124) - { - xfer += this->success[_i1124].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25419,23 +26323,15 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_privilege_set_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1125; - for (_iter1125 = this->success.begin(); _iter1125 != this->success.end(); ++_iter1125) - { - xfer += (*_iter1125).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -25448,11 +26344,11 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_list_roles_presult::~ThriftHiveMetastore_list_roles_presult() throw() { +ThriftHiveMetastore_get_privilege_set_presult::~ThriftHiveMetastore_get_privilege_set_presult() throw() { } -uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_privilege_set_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25473,20 +26369,8 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - (*(this->success)).resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) - { - xfer += (*(this->success))[_i1130].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25513,11 +26397,11 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_grant_revoke_role_args::~ThriftHiveMetastore_grant_revoke_role_args() throw() { +ThriftHiveMetastore_list_privileges_args::~ThriftHiveMetastore_list_privileges_args() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25538,9 +26422,27 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_args::read(::apache::thrift::prot switch (fid) { case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->principal_name); + this->__isset.principal_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast1208; + xfer += iprot->readI32(ecast1208); + this->principal_type = (PrincipalType::type)ecast1208; + this->__isset.principal_type = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + xfer += this->hiveObject.read(iprot); + this->__isset.hiveObject = true; } else { xfer += iprot->skip(ftype); } @@ -25557,13 +26459,21 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_args::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_grant_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->principal_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)this->principal_type); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->hiveObject.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25573,17 +26483,25 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_args::write(::apache::thrift::pro } -ThriftHiveMetastore_grant_revoke_role_pargs::~ThriftHiveMetastore_grant_revoke_role_pargs() throw() { +ThriftHiveMetastore_list_privileges_pargs::~ThriftHiveMetastore_list_privileges_pargs() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->principal_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32((int32_t)(*(this->principal_type))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->hiveObject)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25593,11 +26511,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_grant_revoke_role_result::~ThriftHiveMetastore_grant_revoke_role_result() throw() { +ThriftHiveMetastore_list_privileges_result::~ThriftHiveMetastore_list_privileges_result() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25618,8 +26536,20 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_result::read(::apache::thrift::pr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1209; + ::apache::thrift::protocol::TType _etype1212; + xfer += iprot->readListBegin(_etype1212, _size1209); + this->success.resize(_size1209); + uint32_t _i1213; + for (_i1213 = 0; _i1213 < _size1209; ++_i1213) + { + xfer += this->success[_i1213].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25645,15 +26575,23 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_grant_revoke_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1214; + for (_iter1214 = this->success.begin(); _iter1214 != this->success.end(); ++_iter1214) + { + xfer += (*_iter1214).write(oprot); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -25666,11 +26604,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_result::write(::apache::thrift::p } -ThriftHiveMetastore_grant_revoke_role_presult::~ThriftHiveMetastore_grant_revoke_role_presult() throw() { +ThriftHiveMetastore_list_privileges_presult::~ThriftHiveMetastore_list_privileges_presult() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25691,8 +26629,20 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_presult::read(::apache::thrift::p switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1215; + ::apache::thrift::protocol::TType _etype1218; + xfer += iprot->readListBegin(_etype1218, _size1215); + (*(this->success)).resize(_size1215); + uint32_t _i1219; + for (_i1219 = 0; _i1219 < _size1215; ++_i1219) + { + xfer += (*(this->success))[_i1219].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25719,11 +26669,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_presult::read(::apache::thrift::p } -ThriftHiveMetastore_get_principals_in_role_args::~ThriftHiveMetastore_get_principals_in_role_args() throw() { +ThriftHiveMetastore_grant_privileges_args::~ThriftHiveMetastore_grant_privileges_args() throw() { } -uint32_t ThriftHiveMetastore_get_principals_in_role_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25745,8 +26695,8 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_args::read(::apache::thrift: { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + xfer += this->privileges.read(iprot); + this->__isset.privileges = true; } else { xfer += iprot->skip(ftype); } @@ -25763,13 +26713,13 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_principals_in_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25779,17 +26729,17 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_args::write(::apache::thrift } -ThriftHiveMetastore_get_principals_in_role_pargs::~ThriftHiveMetastore_get_principals_in_role_pargs() throw() { +ThriftHiveMetastore_grant_privileges_pargs::~ThriftHiveMetastore_grant_privileges_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_principals_in_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->privileges)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25799,11 +26749,11 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_pargs::write(::apache::thrif } -ThriftHiveMetastore_get_principals_in_role_result::~ThriftHiveMetastore_get_principals_in_role_result() throw() { +ThriftHiveMetastore_grant_privileges_result::~ThriftHiveMetastore_grant_privileges_result() throw() { } -uint32_t ThriftHiveMetastore_get_principals_in_role_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25824,8 +26774,8 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_result::read(::apache::thrif switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25851,15 +26801,15 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_principals_in_role_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -25872,11 +26822,11 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_result::write(::apache::thri } -ThriftHiveMetastore_get_principals_in_role_presult::~ThriftHiveMetastore_get_principals_in_role_presult() throw() { +ThriftHiveMetastore_grant_privileges_presult::~ThriftHiveMetastore_grant_privileges_presult() throw() { } -uint32_t ThriftHiveMetastore_get_principals_in_role_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25897,8 +26847,8 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_presult::read(::apache::thri switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25925,11 +26875,11 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_presult::read(::apache::thri } -ThriftHiveMetastore_get_role_grants_for_principal_args::~ThriftHiveMetastore_get_role_grants_for_principal_args() throw() { +ThriftHiveMetastore_revoke_privileges_args::~ThriftHiveMetastore_revoke_privileges_args() throw() { } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -25951,8 +26901,8 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::read(::apache:: { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + xfer += this->privileges.read(iprot); + this->__isset.privileges = true; } else { xfer += iprot->skip(ftype); } @@ -25969,13 +26919,13 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::read(::apache:: return xfer; } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25985,17 +26935,17 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::write(::apache: } -ThriftHiveMetastore_get_role_grants_for_principal_pargs::~ThriftHiveMetastore_get_role_grants_for_principal_pargs() throw() { +ThriftHiveMetastore_revoke_privileges_pargs::~ThriftHiveMetastore_revoke_privileges_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->privileges)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26005,11 +26955,11 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_pargs::write(::apache } -ThriftHiveMetastore_get_role_grants_for_principal_result::~ThriftHiveMetastore_get_role_grants_for_principal_result() throw() { +ThriftHiveMetastore_revoke_privileges_result::~ThriftHiveMetastore_revoke_privileges_result() throw() { } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26030,8 +26980,8 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::read(::apache switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -26057,15 +27007,15 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::read(::apache return xfer; } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_revoke_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -26078,11 +27028,11 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_result::write(::apach } -ThriftHiveMetastore_get_role_grants_for_principal_presult::~ThriftHiveMetastore_get_role_grants_for_principal_presult() throw() { +ThriftHiveMetastore_revoke_privileges_presult::~ThriftHiveMetastore_revoke_privileges_presult() throw() { } -uint32_t ThriftHiveMetastore_get_role_grants_for_principal_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_revoke_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26103,8 +27053,8 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_presult::read(::apach switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -26131,11 +27081,11 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_presult::read(::apach } -ThriftHiveMetastore_get_privilege_set_args::~ThriftHiveMetastore_get_privilege_set_args() throw() { +ThriftHiveMetastore_grant_revoke_privileges_args::~ThriftHiveMetastore_grant_revoke_privileges_args() throw() { } -uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26157,36 +27107,8 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->hiveObject.read(iprot); - this->__isset.hiveObject = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->user_name); - this->__isset.user_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->group_names.clear(); - uint32_t _size1131; - ::apache::thrift::protocol::TType _etype1134; - xfer += iprot->readListBegin(_etype1134, _size1131); - this->group_names.resize(_size1131); - uint32_t _i1135; - for (_i1135 = 0; _i1135 < _size1131; ++_i1135) - { - xfer += iprot->readString(this->group_names[_i1135]); - } - xfer += iprot->readListEnd(); - } - this->__isset.group_names = true; + xfer += this->request.read(iprot); + this->__isset.request = true; } else { xfer += iprot->skip(ftype); } @@ -26203,29 +27125,13 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_args"); - - xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->hiveObject.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->user_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_args"); - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1136; - for (_iter1136 = this->group_names.begin(); _iter1136 != this->group_names.end(); ++_iter1136) - { - xfer += oprot->writeString((*_iter1136)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26235,33 +27141,17 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro } -ThriftHiveMetastore_get_privilege_set_pargs::~ThriftHiveMetastore_get_privilege_set_pargs() throw() { +ThriftHiveMetastore_grant_revoke_privileges_pargs::~ThriftHiveMetastore_grant_revoke_privileges_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_pargs"); - - xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->hiveObject)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->user_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_pargs"); - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1137; - for (_iter1137 = (*(this->group_names)).begin(); _iter1137 != (*(this->group_names)).end(); ++_iter1137) - { - xfer += oprot->writeString((*_iter1137)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26271,11 +27161,11 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_get_privilege_set_result::~ThriftHiveMetastore_get_privilege_set_result() throw() { +ThriftHiveMetastore_grant_revoke_privileges_result::~ThriftHiveMetastore_grant_revoke_privileges_result() throw() { } -uint32_t ThriftHiveMetastore_get_privilege_set_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26323,11 +27213,11 @@ uint32_t ThriftHiveMetastore_get_privilege_set_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_get_privilege_set_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -26344,11 +27234,11 @@ uint32_t ThriftHiveMetastore_get_privilege_set_result::write(::apache::thrift::p } -ThriftHiveMetastore_get_privilege_set_presult::~ThriftHiveMetastore_get_privilege_set_presult() throw() { +ThriftHiveMetastore_grant_revoke_privileges_presult::~ThriftHiveMetastore_grant_revoke_privileges_presult() throw() { } -uint32_t ThriftHiveMetastore_get_privilege_set_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_grant_revoke_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26397,11 +27287,11 @@ uint32_t ThriftHiveMetastore_get_privilege_set_presult::read(::apache::thrift::p } -ThriftHiveMetastore_list_privileges_args::~ThriftHiveMetastore_list_privileges_args() throw() { +ThriftHiveMetastore_set_ugi_args::~ThriftHiveMetastore_set_ugi_args() throw() { } -uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26423,26 +27313,28 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->principal_name); - this->__isset.principal_name = true; + xfer += iprot->readString(this->user_name); + this->__isset.user_name = true; } else { xfer += iprot->skip(ftype); } break; case 2: - if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1138; - xfer += iprot->readI32(ecast1138); - this->principal_type = (PrincipalType::type)ecast1138; - this->__isset.principal_type = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->hiveObject.read(iprot); - this->__isset.hiveObject = true; + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->group_names.clear(); + uint32_t _size1220; + ::apache::thrift::protocol::TType _etype1223; + xfer += iprot->readListBegin(_etype1223, _size1220); + this->group_names.resize(_size1220); + uint32_t _i1224; + for (_i1224 = 0; _i1224 < _size1220; ++_i1224) + { + xfer += iprot->readString(this->group_names[_i1224]); + } + xfer += iprot->readListEnd(); + } + this->__isset.group_names = true; } else { xfer += iprot->skip(ftype); } @@ -26459,21 +27351,25 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_list_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_args"); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->principal_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_args"); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); - xfer += oprot->writeI32((int32_t)this->principal_type); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->hiveObject.write(oprot); + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); + std::vector ::const_iterator _iter1225; + for (_iter1225 = this->group_names.begin(); _iter1225 != this->group_names.end(); ++_iter1225) + { + xfer += oprot->writeString((*_iter1225)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26483,25 +27379,29 @@ uint32_t ThriftHiveMetastore_list_privileges_args::write(::apache::thrift::proto } -ThriftHiveMetastore_list_privileges_pargs::~ThriftHiveMetastore_list_privileges_pargs() throw() { +ThriftHiveMetastore_set_ugi_pargs::~ThriftHiveMetastore_set_ugi_pargs() throw() { } -uint32_t ThriftHiveMetastore_list_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_pargs"); - - xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->principal_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_pargs"); - xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); - xfer += oprot->writeI32((int32_t)(*(this->principal_type))); + xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += (*(this->hiveObject)).write(oprot); + xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); + std::vector ::const_iterator _iter1226; + for (_iter1226 = (*(this->group_names)).begin(); _iter1226 != (*(this->group_names)).end(); ++_iter1226) + { + xfer += oprot->writeString((*_iter1226)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26511,11 +27411,11 @@ uint32_t ThriftHiveMetastore_list_privileges_pargs::write(::apache::thrift::prot } -ThriftHiveMetastore_list_privileges_result::~ThriftHiveMetastore_list_privileges_result() throw() { +ThriftHiveMetastore_set_ugi_result::~ThriftHiveMetastore_set_ugi_result() throw() { } -uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26539,14 +27439,107 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1139; - ::apache::thrift::protocol::TType _etype1142; - xfer += iprot->readListBegin(_etype1142, _size1139); - this->success.resize(_size1139); - uint32_t _i1143; - for (_i1143 = 0; _i1143 < _size1139; ++_i1143) + uint32_t _size1227; + ::apache::thrift::protocol::TType _etype1230; + xfer += iprot->readListBegin(_etype1230, _size1227); + this->success.resize(_size1227); + uint32_t _i1231; + for (_i1231 = 0; _i1231 < _size1227; ++_i1231) + { + xfer += iprot->readString(this->success[_i1231]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1232; + for (_iter1232 = this->success.begin(); _iter1232 != this->success.end(); ++_iter1232) + { + xfer += oprot->writeString((*_iter1232)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_set_ugi_presult::~ThriftHiveMetastore_set_ugi_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1233; + ::apache::thrift::protocol::TType _etype1236; + xfer += iprot->readListBegin(_etype1236, _size1233); + (*(this->success)).resize(_size1233); + uint32_t _i1237; + for (_i1237 = 0; _i1237 < _size1233; ++_i1237) { - xfer += this->success[_i1143].read(iprot); + xfer += iprot->readString((*(this->success))[_i1237]); } xfer += iprot->readListEnd(); } @@ -26575,40 +27568,12 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1144; - for (_iter1144 = this->success.begin(); _iter1144 != this->success.end(); ++_iter1144) - { - xfer += (*_iter1144).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_list_privileges_presult::~ThriftHiveMetastore_list_privileges_presult() throw() { +ThriftHiveMetastore_get_delegation_token_args::~ThriftHiveMetastore_get_delegation_token_args() throw() { } -uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26628,75 +27593,18 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1145; - ::apache::thrift::protocol::TType _etype1148; - xfer += iprot->readListBegin(_etype1148, _size1145); - (*(this->success)).resize(_size1145); - uint32_t _i1149; - for (_i1149 = 0; _i1149 < _size1145; ++_i1149) - { - xfer += (*(this->success))[_i1149].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->token_owner); + this->__isset.token_owner = true; } else { xfer += iprot->skip(ftype); } break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_grant_privileges_args::~ThriftHiveMetastore_grant_privileges_args() throw() { -} - - -uint32_t ThriftHiveMetastore_grant_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->privileges.read(iprot); - this->__isset.privileges = true; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->renewer_kerberos_principal_name); + this->__isset.renewer_kerberos_principal_name = true; } else { xfer += iprot->skip(ftype); } @@ -26713,13 +27621,17 @@ uint32_t ThriftHiveMetastore_grant_privileges_args::read(::apache::thrift::proto return xfer; } -uint32_t ThriftHiveMetastore_grant_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_args"); - xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->privileges.write(oprot); + xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->token_owner); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->renewer_kerberos_principal_name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26729,17 +27641,21 @@ uint32_t ThriftHiveMetastore_grant_privileges_args::write(::apache::thrift::prot } -ThriftHiveMetastore_grant_privileges_pargs::~ThriftHiveMetastore_grant_privileges_pargs() throw() { +ThriftHiveMetastore_get_delegation_token_pargs::~ThriftHiveMetastore_get_delegation_token_pargs() throw() { } -uint32_t ThriftHiveMetastore_grant_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_pargs"); - xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->privileges)).write(oprot); + xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->token_owner))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->renewer_kerberos_principal_name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26749,11 +27665,11 @@ uint32_t ThriftHiveMetastore_grant_privileges_pargs::write(::apache::thrift::pro } -ThriftHiveMetastore_grant_privileges_result::~ThriftHiveMetastore_grant_privileges_result() throw() { +ThriftHiveMetastore_get_delegation_token_result::~ThriftHiveMetastore_get_delegation_token_result() throw() { } -uint32_t ThriftHiveMetastore_grant_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26774,8 +27690,8 @@ uint32_t ThriftHiveMetastore_grant_privileges_result::read(::apache::thrift::pro switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -26801,15 +27717,15 @@ uint32_t ThriftHiveMetastore_grant_privileges_result::read(::apache::thrift::pro return xfer; } -uint32_t ThriftHiveMetastore_grant_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0); + xfer += oprot->writeString(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -26822,11 +27738,11 @@ uint32_t ThriftHiveMetastore_grant_privileges_result::write(::apache::thrift::pr } -ThriftHiveMetastore_grant_privileges_presult::~ThriftHiveMetastore_grant_privileges_presult() throw() { +ThriftHiveMetastore_get_delegation_token_presult::~ThriftHiveMetastore_get_delegation_token_presult() throw() { } -uint32_t ThriftHiveMetastore_grant_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26847,8 +27763,8 @@ uint32_t ThriftHiveMetastore_grant_privileges_presult::read(::apache::thrift::pr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -26875,11 +27791,11 @@ uint32_t ThriftHiveMetastore_grant_privileges_presult::read(::apache::thrift::pr } -ThriftHiveMetastore_revoke_privileges_args::~ThriftHiveMetastore_revoke_privileges_args() throw() { +ThriftHiveMetastore_renew_delegation_token_args::~ThriftHiveMetastore_renew_delegation_token_args() throw() { } -uint32_t ThriftHiveMetastore_revoke_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_renew_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26900,9 +27816,9 @@ uint32_t ThriftHiveMetastore_revoke_privileges_args::read(::apache::thrift::prot switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->privileges.read(iprot); - this->__isset.privileges = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->token_str_form); + this->__isset.token_str_form = true; } else { xfer += iprot->skip(ftype); } @@ -26919,13 +27835,13 @@ uint32_t ThriftHiveMetastore_revoke_privileges_args::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_renew_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_args"); - xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->privileges.write(oprot); + xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->token_str_form); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26935,17 +27851,17 @@ uint32_t ThriftHiveMetastore_revoke_privileges_args::write(::apache::thrift::pro } -ThriftHiveMetastore_revoke_privileges_pargs::~ThriftHiveMetastore_revoke_privileges_pargs() throw() { +ThriftHiveMetastore_renew_delegation_token_pargs::~ThriftHiveMetastore_renew_delegation_token_pargs() throw() { } -uint32_t ThriftHiveMetastore_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_renew_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_pargs"); - xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->privileges)).write(oprot); + xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->token_str_form))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26955,11 +27871,11 @@ uint32_t ThriftHiveMetastore_revoke_privileges_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_revoke_privileges_result::~ThriftHiveMetastore_revoke_privileges_result() throw() { +ThriftHiveMetastore_renew_delegation_token_result::~ThriftHiveMetastore_renew_delegation_token_result() throw() { } -uint32_t ThriftHiveMetastore_revoke_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_renew_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -26980,8 +27896,8 @@ uint32_t ThriftHiveMetastore_revoke_privileges_result::read(::apache::thrift::pr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -27007,15 +27923,15 @@ uint32_t ThriftHiveMetastore_revoke_privileges_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_revoke_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_renew_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I64, 0); + xfer += oprot->writeI64(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -27028,11 +27944,11 @@ uint32_t ThriftHiveMetastore_revoke_privileges_result::write(::apache::thrift::p } -ThriftHiveMetastore_revoke_privileges_presult::~ThriftHiveMetastore_revoke_privileges_presult() throw() { +ThriftHiveMetastore_renew_delegation_token_presult::~ThriftHiveMetastore_renew_delegation_token_presult() throw() { } -uint32_t ThriftHiveMetastore_revoke_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_renew_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27053,8 +27969,8 @@ uint32_t ThriftHiveMetastore_revoke_privileges_presult::read(::apache::thrift::p switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -27081,11 +27997,11 @@ uint32_t ThriftHiveMetastore_revoke_privileges_presult::read(::apache::thrift::p } -ThriftHiveMetastore_grant_revoke_privileges_args::~ThriftHiveMetastore_grant_revoke_privileges_args() throw() { +ThriftHiveMetastore_cancel_delegation_token_args::~ThriftHiveMetastore_cancel_delegation_token_args() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_cancel_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27106,9 +28022,9 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::read(::apache::thrift switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->token_str_form); + this->__isset.token_str_form = true; } else { xfer += iprot->skip(ftype); } @@ -27125,13 +28041,13 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::read(::apache::thrift return xfer; } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_cancel_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->token_str_form); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -27141,17 +28057,17 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::write(::apache::thrif } -ThriftHiveMetastore_grant_revoke_privileges_pargs::~ThriftHiveMetastore_grant_revoke_privileges_pargs() throw() { +ThriftHiveMetastore_cancel_delegation_token_pargs::~ThriftHiveMetastore_cancel_delegation_token_pargs() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_cancel_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->token_str_form))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -27161,11 +28077,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_pargs::write(::apache::thri } -ThriftHiveMetastore_grant_revoke_privileges_result::~ThriftHiveMetastore_grant_revoke_privileges_result() throw() { +ThriftHiveMetastore_cancel_delegation_token_result::~ThriftHiveMetastore_cancel_delegation_token_result() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_cancel_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27185,14 +28101,6 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::read(::apache::thri } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -27213,17 +28121,13 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::read(::apache::thri return xfer; } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_cancel_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27234,11 +28138,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_result::write(::apache::thr } -ThriftHiveMetastore_grant_revoke_privileges_presult::~ThriftHiveMetastore_grant_revoke_privileges_presult() throw() { +ThriftHiveMetastore_cancel_delegation_token_presult::~ThriftHiveMetastore_cancel_delegation_token_presult() throw() { } -uint32_t ThriftHiveMetastore_grant_revoke_privileges_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_cancel_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27258,14 +28162,6 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_presult::read(::apache::thr } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -27287,11 +28183,11 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_presult::read(::apache::thr } -ThriftHiveMetastore_set_ugi_args::~ThriftHiveMetastore_set_ugi_args() throw() { +ThriftHiveMetastore_get_open_txns_args::~ThriftHiveMetastore_get_open_txns_args() throw() { } -uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27309,40 +28205,7 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->user_name); - this->__isset.user_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->group_names.clear(); - uint32_t _size1150; - ::apache::thrift::protocol::TType _etype1153; - xfer += iprot->readListBegin(_etype1153, _size1150); - this->group_names.resize(_size1150); - uint32_t _i1154; - for (_i1154 = 0; _i1154 < _size1150; ++_i1154) - { - xfer += iprot->readString(this->group_names[_i1154]); - } - xfer += iprot->readListEnd(); - } - this->__isset.group_names = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -27351,26 +28214,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro return xfer; } -uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_args"); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->user_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1155; - for (_iter1155 = this->group_names.begin(); _iter1155 != this->group_names.end(); ++_iter1155) - { - xfer += oprot->writeString((*_iter1155)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -27379,30 +28226,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_set_ugi_pargs::~ThriftHiveMetastore_set_ugi_pargs() throw() { +ThriftHiveMetastore_get_open_txns_pargs::~ThriftHiveMetastore_get_open_txns_pargs() throw() { } -uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_pargs"); - - xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->user_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1156; - for (_iter1156 = (*(this->group_names)).begin(); _iter1156 != (*(this->group_names)).end(); ++_iter1156) - { - xfer += oprot->writeString((*_iter1156)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -27411,11 +28242,11 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_set_ugi_result::~ThriftHiveMetastore_set_ugi_result() throw() { +ThriftHiveMetastore_get_open_txns_result::~ThriftHiveMetastore_get_open_txns_result() throw() { } -uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27436,29 +28267,9 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1157; - ::apache::thrift::protocol::TType _etype1160; - xfer += iprot->readListBegin(_etype1160, _size1157); - this->success.resize(_size1157); - uint32_t _i1161; - for (_i1161 = 0; _i1161 < _size1157; ++_i1161) - { - xfer += iprot->readString(this->success[_i1161]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27475,27 +28286,15 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1162; - for (_iter1162 = this->success.begin(); _iter1162 != this->success.end(); ++_iter1162) - { - xfer += oprot->writeString((*_iter1162)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -27504,11 +28303,11 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_set_ugi_presult::~ThriftHiveMetastore_set_ugi_presult() throw() { +ThriftHiveMetastore_get_open_txns_presult::~ThriftHiveMetastore_get_open_txns_presult() throw() { } -uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27529,29 +28328,9 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1163; - ::apache::thrift::protocol::TType _etype1166; - xfer += iprot->readListBegin(_etype1166, _size1163); - (*(this->success)).resize(_size1163); - uint32_t _i1167; - for (_i1167 = 0; _i1167 < _size1163; ++_i1167) - { - xfer += iprot->readString((*(this->success))[_i1167]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27569,11 +28348,11 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T } -ThriftHiveMetastore_get_delegation_token_args::~ThriftHiveMetastore_get_delegation_token_args() throw() { +ThriftHiveMetastore_get_open_txns_info_args::~ThriftHiveMetastore_get_open_txns_info_args() throw() { } -uint32_t ThriftHiveMetastore_get_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27591,28 +28370,7 @@ uint32_t ThriftHiveMetastore_get_delegation_token_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->token_owner); - this->__isset.token_owner = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->renewer_kerberos_principal_name); - this->__isset.renewer_kerberos_principal_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -27621,18 +28379,10 @@ uint32_t ThriftHiveMetastore_get_delegation_token_args::read(::apache::thrift::p return xfer; } -uint32_t ThriftHiveMetastore_get_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_args"); - - xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->token_owner); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->renewer_kerberos_principal_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -27641,22 +28391,14 @@ uint32_t ThriftHiveMetastore_get_delegation_token_args::write(::apache::thrift:: } -ThriftHiveMetastore_get_delegation_token_pargs::~ThriftHiveMetastore_get_delegation_token_pargs() throw() { +ThriftHiveMetastore_get_open_txns_info_pargs::~ThriftHiveMetastore_get_open_txns_info_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_pargs"); - - xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->token_owner))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->renewer_kerberos_principal_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -27665,11 +28407,11 @@ uint32_t ThriftHiveMetastore_get_delegation_token_pargs::write(::apache::thrift: } -ThriftHiveMetastore_get_delegation_token_result::~ThriftHiveMetastore_get_delegation_token_result() throw() { +ThriftHiveMetastore_get_open_txns_info_result::~ThriftHiveMetastore_get_open_txns_info_result() throw() { } -uint32_t ThriftHiveMetastore_get_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27690,17 +28432,9 @@ uint32_t ThriftHiveMetastore_get_delegation_token_result::read(::apache::thrift: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27717,19 +28451,15 @@ uint32_t ThriftHiveMetastore_get_delegation_token_result::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0); - xfer += oprot->writeString(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -27738,11 +28468,11 @@ uint32_t ThriftHiveMetastore_get_delegation_token_result::write(::apache::thrift } -ThriftHiveMetastore_get_delegation_token_presult::~ThriftHiveMetastore_get_delegation_token_presult() throw() { +ThriftHiveMetastore_get_open_txns_info_presult::~ThriftHiveMetastore_get_open_txns_info_presult() throw() { } -uint32_t ThriftHiveMetastore_get_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27763,17 +28493,9 @@ uint32_t ThriftHiveMetastore_get_delegation_token_presult::read(::apache::thrift switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27791,11 +28513,11 @@ uint32_t ThriftHiveMetastore_get_delegation_token_presult::read(::apache::thrift } -ThriftHiveMetastore_renew_delegation_token_args::~ThriftHiveMetastore_renew_delegation_token_args() throw() { +ThriftHiveMetastore_open_txns_args::~ThriftHiveMetastore_open_txns_args() throw() { } -uint32_t ThriftHiveMetastore_renew_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27816,9 +28538,9 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_args::read(::apache::thrift: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->token_str_form); - this->__isset.token_str_form = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -27835,13 +28557,13 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_renew_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_args"); - xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->token_str_form); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -27851,17 +28573,17 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_args::write(::apache::thrift } -ThriftHiveMetastore_renew_delegation_token_pargs::~ThriftHiveMetastore_renew_delegation_token_pargs() throw() { +ThriftHiveMetastore_open_txns_pargs::~ThriftHiveMetastore_open_txns_pargs() throw() { } -uint32_t ThriftHiveMetastore_renew_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_pargs"); - xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->token_str_form))); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -27871,11 +28593,11 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_pargs::write(::apache::thrif } -ThriftHiveMetastore_renew_delegation_token_result::~ThriftHiveMetastore_renew_delegation_token_result() throw() { +ThriftHiveMetastore_open_txns_result::~ThriftHiveMetastore_open_txns_result() throw() { } -uint32_t ThriftHiveMetastore_renew_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27896,17 +28618,9 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_result::read(::apache::thrif switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27923,19 +28637,15 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_renew_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I64, 0); - xfer += oprot->writeI64(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -27944,11 +28654,11 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_result::write(::apache::thri } -ThriftHiveMetastore_renew_delegation_token_presult::~ThriftHiveMetastore_renew_delegation_token_presult() throw() { +ThriftHiveMetastore_open_txns_presult::~ThriftHiveMetastore_open_txns_presult() throw() { } -uint32_t ThriftHiveMetastore_renew_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -27969,17 +28679,9 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_presult::read(::apache::thri switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -27997,11 +28699,11 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_presult::read(::apache::thri } -ThriftHiveMetastore_cancel_delegation_token_args::~ThriftHiveMetastore_cancel_delegation_token_args() throw() { +ThriftHiveMetastore_abort_txn_args::~ThriftHiveMetastore_abort_txn_args() throw() { } -uint32_t ThriftHiveMetastore_cancel_delegation_token_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28022,9 +28724,9 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_args::read(::apache::thrift switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->token_str_form); - this->__isset.token_str_form = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -28041,13 +28743,13 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_args::read(::apache::thrift return xfer; } -uint32_t ThriftHiveMetastore_cancel_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_args"); - xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->token_str_form); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -28057,17 +28759,17 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_args::write(::apache::thrif } -ThriftHiveMetastore_cancel_delegation_token_pargs::~ThriftHiveMetastore_cancel_delegation_token_pargs() throw() { +ThriftHiveMetastore_abort_txn_pargs::~ThriftHiveMetastore_abort_txn_pargs() throw() { } -uint32_t ThriftHiveMetastore_cancel_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_pargs"); - xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->token_str_form))); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -28077,11 +28779,11 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_pargs::write(::apache::thri } -ThriftHiveMetastore_cancel_delegation_token_result::~ThriftHiveMetastore_cancel_delegation_token_result() throw() { +ThriftHiveMetastore_abort_txn_result::~ThriftHiveMetastore_abort_txn_result() throw() { } -uint32_t ThriftHiveMetastore_cancel_delegation_token_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28121,11 +28823,11 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_result::read(::apache::thri return xfer; } -uint32_t ThriftHiveMetastore_cancel_delegation_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -28138,11 +28840,11 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_result::write(::apache::thr } -ThriftHiveMetastore_cancel_delegation_token_presult::~ThriftHiveMetastore_cancel_delegation_token_presult() throw() { +ThriftHiveMetastore_abort_txn_presult::~ThriftHiveMetastore_abort_txn_presult() throw() { } -uint32_t ThriftHiveMetastore_cancel_delegation_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28183,11 +28885,11 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_presult::read(::apache::thr } -ThriftHiveMetastore_get_open_txns_args::~ThriftHiveMetastore_get_open_txns_args() throw() { +ThriftHiveMetastore_commit_txn_args::~ThriftHiveMetastore_commit_txn_args() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28205,7 +28907,20 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -28214,10 +28929,14 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28226,14 +28945,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_open_txns_pargs::~ThriftHiveMetastore_get_open_txns_pargs() throw() { +ThriftHiveMetastore_commit_txn_pargs::~ThriftHiveMetastore_commit_txn_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28242,11 +28965,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_open_txns_result::~ThriftHiveMetastore_get_open_txns_result() throw() { +ThriftHiveMetastore_commit_txn_result::~ThriftHiveMetastore_commit_txn_result() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28266,10 +28989,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protoc } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -28286,15 +29017,19 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -28303,11 +29038,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::proto } -ThriftHiveMetastore_get_open_txns_presult::~ThriftHiveMetastore_get_open_txns_presult() throw() { +ThriftHiveMetastore_commit_txn_presult::~ThriftHiveMetastore_commit_txn_presult() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28327,10 +29062,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::proto } switch (fid) { - case 0: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -28348,11 +29091,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::proto } -ThriftHiveMetastore_get_open_txns_info_args::~ThriftHiveMetastore_get_open_txns_info_args() throw() { +ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28370,7 +29113,20 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -28379,10 +29135,14 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::pro return xfer; } -uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28391,14 +29151,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::pr } -ThriftHiveMetastore_get_open_txns_info_pargs::~ThriftHiveMetastore_get_open_txns_info_pargs() throw() { +ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); + + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28407,11 +29171,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::p } -ThriftHiveMetastore_get_open_txns_info_result::~ThriftHiveMetastore_get_open_txns_info_result() throw() { +ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28439,6 +29203,22 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::p xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28451,16 +29231,24 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::p return xfer; } -uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28468,11 +29256,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift:: } -ThriftHiveMetastore_get_open_txns_info_presult::~ThriftHiveMetastore_get_open_txns_info_presult() throw() { +ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28500,6 +29288,22 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift:: xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28513,11 +29317,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift:: } -ThriftHiveMetastore_open_txns_args::~ThriftHiveMetastore_open_txns_args() throw() { +ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { } -uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28557,10 +29361,10 @@ uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -28573,14 +29377,14 @@ uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_open_txns_pargs::~ThriftHiveMetastore_open_txns_pargs() throw() { +ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { } -uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -28593,11 +29397,11 @@ uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_open_txns_result::~ThriftHiveMetastore_open_txns_result() throw() { +ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { } -uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28625,6 +29429,30 @@ uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28637,16 +29465,28 @@ uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28654,11 +29494,11 @@ uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_open_txns_presult::~ThriftHiveMetastore_open_txns_presult() throw() { +ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { } -uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28686,6 +29526,30 @@ uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28699,11 +29563,11 @@ uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_abort_txn_args::~ThriftHiveMetastore_abort_txn_args() throw() { +ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { } -uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28743,10 +29607,10 @@ uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -28759,14 +29623,14 @@ uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_abort_txn_pargs::~ThriftHiveMetastore_abort_txn_pargs() throw() { +ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { } -uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -28779,11 +29643,11 @@ uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_abort_txn_result::~ThriftHiveMetastore_abort_txn_result() throw() { +ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { } -uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28811,6 +29675,14 @@ uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28823,16 +29695,20 @@ uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -28840,11 +29716,11 @@ uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_abort_txn_presult::~ThriftHiveMetastore_abort_txn_presult() throw() { +ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { } -uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28872,6 +29748,14 @@ uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -28885,11 +29769,11 @@ uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_commit_txn_args::~ThriftHiveMetastore_commit_txn_args() throw() { +ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { } -uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28929,10 +29813,10 @@ uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -28945,14 +29829,14 @@ uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_commit_txn_pargs::~ThriftHiveMetastore_commit_txn_pargs() throw() { +ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { } -uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -28965,11 +29849,11 @@ uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_commit_txn_result::~ThriftHiveMetastore_commit_txn_result() throw() { +ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { } -uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -28989,18 +29873,10 @@ uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol: } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -29017,19 +29893,15 @@ uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -29038,11 +29910,11 @@ uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_commit_txn_presult::~ThriftHiveMetastore_commit_txn_presult() throw() { +ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { } -uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29062,18 +29934,10 @@ uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -29091,11 +29955,11 @@ uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() { +ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { } -uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29117,8 +29981,8 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->ids.read(iprot); + this->__isset.ids = true; } else { xfer += iprot->skip(ftype); } @@ -29135,13 +29999,13 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc return xfer; } -uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ids.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -29151,17 +30015,17 @@ uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProto } -ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() { +ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { } -uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->ids)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -29171,11 +30035,11 @@ uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProt } -ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() { +ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { } -uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29195,14 +30059,6 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -29219,6 +30075,14 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -29231,17 +30095,13 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -29249,6 +30109,10 @@ uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TPro xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -29256,11 +30120,11 @@ uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() { +ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { } -uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29280,14 +30144,6 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -29304,6 +30160,14 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -29317,11 +30181,11 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() { +ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { } -uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29343,8 +30207,8 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->txns.read(iprot); + this->__isset.txns = true; } else { xfer += iprot->skip(ftype); } @@ -29361,13 +30225,13 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->txns.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -29377,17 +30241,17 @@ uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() { +ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { } -uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->txns)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -29397,11 +30261,11 @@ uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() { +ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { } -uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29429,30 +30293,6 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -29465,28 +30305,16 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -29494,11 +30322,11 @@ uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() { +ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { } -uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29526,30 +30354,6 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -29563,11 +30367,11 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() { +ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { } -uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29607,10 +30411,10 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt return xfer; } -uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -29623,14 +30427,14 @@ uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TPro } -ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() { +ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { } -uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -29643,11 +30447,11 @@ uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() { +ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { } -uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29665,28 +30469,7 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -29695,32 +30478,23 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() { +ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { } -uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29738,28 +30512,7 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -29769,11 +30522,11 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP } -ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() { +ThriftHiveMetastore_show_compact_args::~ThriftHiveMetastore_show_compact_args() throw() { } -uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29813,10 +30566,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -29829,14 +30582,14 @@ uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() { +ThriftHiveMetastore_show_compact_pargs::~ThriftHiveMetastore_show_compact_pargs() throw() { } -uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -29849,11 +30602,11 @@ uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() { +ThriftHiveMetastore_show_compact_result::~ThriftHiveMetastore_show_compact_result() throw() { } -uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29893,11 +30646,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_show_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -29910,11 +30663,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() { +ThriftHiveMetastore_show_compact_presult::~ThriftHiveMetastore_show_compact_presult() throw() { } -uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29955,11 +30708,11 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() { +ThriftHiveMetastore_add_dynamic_partitions_args::~ThriftHiveMetastore_add_dynamic_partitions_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -29981,8 +30734,8 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ids.read(iprot); - this->__isset.ids = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -29999,13 +30752,13 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_args"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->ids.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30015,17 +30768,17 @@ uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() { +ThriftHiveMetastore_add_dynamic_partitions_pargs::~ThriftHiveMetastore_add_dynamic_partitions_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_pargs"); - xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->ids)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30035,11 +30788,11 @@ uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() { +ThriftHiveMetastore_add_dynamic_partitions_result::~ThriftHiveMetastore_add_dynamic_partitions_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30075,14 +30828,6 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -30095,11 +30840,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -30109,10 +30854,6 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -30120,11 +30861,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() { +ThriftHiveMetastore_add_dynamic_partitions_presult::~ThriftHiveMetastore_add_dynamic_partitions_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30160,14 +30901,6 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -30181,11 +30914,11 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() { +ThriftHiveMetastore_get_next_notification_args::~ThriftHiveMetastore_get_next_notification_args() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30207,8 +30940,8 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->txns.read(iprot); - this->__isset.txns = true; + xfer += this->rqst.read(iprot); + this->__isset.rqst = true; } else { xfer += iprot->skip(ftype); } @@ -30225,13 +30958,13 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_args"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->txns.write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30241,17 +30974,17 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::p } -ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() { +ThriftHiveMetastore_get_next_notification_pargs::~ThriftHiveMetastore_get_next_notification_pargs() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_pargs"); - xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->txns)).write(oprot); + xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30261,11 +30994,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift:: } -ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() { +ThriftHiveMetastore_get_next_notification_result::~ThriftHiveMetastore_get_next_notification_result() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30305,11 +31038,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -30322,11 +31055,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift: } -ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() { +ThriftHiveMetastore_get_next_notification_presult::~ThriftHiveMetastore_get_next_notification_presult() throw() { } -uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30367,11 +31100,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift: } -ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() { +ThriftHiveMetastore_get_current_notificationEventId_args::~ThriftHiveMetastore_get_current_notificationEventId_args() throw() { } -uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30389,20 +31122,7 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -30411,14 +31131,10 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro return xfer; } -uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); - - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -30427,18 +31143,14 @@ uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TPr } -ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() { +ThriftHiveMetastore_get_current_notificationEventId_pargs::~ThriftHiveMetastore_get_current_notificationEventId_pargs() throw() { } -uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); - - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -30447,11 +31159,11 @@ uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() { +ThriftHiveMetastore_get_current_notificationEventId_result::~ThriftHiveMetastore_get_current_notificationEventId_result() throw() { } -uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30469,7 +31181,20 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -30478,23 +31203,28 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_result"); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() { +ThriftHiveMetastore_get_current_notificationEventId_presult::~ThriftHiveMetastore_get_current_notificationEventId_presult() throw() { } -uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30512,7 +31242,20 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -30522,11 +31265,11 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T } -ThriftHiveMetastore_show_compact_args::~ThriftHiveMetastore_show_compact_args() throw() { +ThriftHiveMetastore_fire_listener_event_args::~ThriftHiveMetastore_fire_listener_event_args() throw() { } -uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_fire_listener_event_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30566,10 +31309,10 @@ uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_fire_listener_event_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_args"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); @@ -30582,14 +31325,14 @@ uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_show_compact_pargs::~ThriftHiveMetastore_show_compact_pargs() throw() { +ThriftHiveMetastore_fire_listener_event_pargs::~ThriftHiveMetastore_fire_listener_event_pargs() throw() { } -uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_fire_listener_event_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_pargs"); xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); @@ -30602,11 +31345,11 @@ uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_show_compact_result::~ThriftHiveMetastore_show_compact_result() throw() { +ThriftHiveMetastore_fire_listener_event_result::~ThriftHiveMetastore_fire_listener_event_result() throw() { } -uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_fire_listener_event_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30646,11 +31389,11 @@ uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_show_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_fire_listener_event_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -30663,11 +31406,11 @@ uint32_t ThriftHiveMetastore_show_compact_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_show_compact_presult::~ThriftHiveMetastore_show_compact_presult() throw() { +ThriftHiveMetastore_fire_listener_event_presult::~ThriftHiveMetastore_fire_listener_event_presult() throw() { } -uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_fire_listener_event_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30708,11 +31451,145 @@ uint32_t ThriftHiveMetastore_show_compact_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_add_dynamic_partitions_args::~ThriftHiveMetastore_add_dynamic_partitions_args() throw() { +ThriftHiveMetastore_flushCache_args::~ThriftHiveMetastore_flushCache_args() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_flushCache_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_flushCache_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_flushCache_pargs::~ThriftHiveMetastore_flushCache_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_flushCache_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + + +ThriftHiveMetastore_flushCache_result::~ThriftHiveMetastore_flushCache_result() throw() { +} + + +uint32_t ThriftHiveMetastore_flushCache_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_flushCache_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_result"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_flushCache_presult::~ThriftHiveMetastore_flushCache_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_flushCache_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_file_metadata_by_expr_args::~ThriftHiveMetastore_get_file_metadata_by_expr_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30734,8 +31611,8 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift: { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->req.read(iprot); + this->__isset.req = true; } else { xfer += iprot->skip(ftype); } @@ -30752,13 +31629,13 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_by_expr_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30768,17 +31645,17 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift } -ThriftHiveMetastore_add_dynamic_partitions_pargs::~ThriftHiveMetastore_add_dynamic_partitions_pargs() throw() { +ThriftHiveMetastore_get_file_metadata_by_expr_pargs::~ThriftHiveMetastore_get_file_metadata_by_expr_pargs() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_by_expr_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30788,11 +31665,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrif } -ThriftHiveMetastore_add_dynamic_partitions_result::~ThriftHiveMetastore_add_dynamic_partitions_result() throw() { +ThriftHiveMetastore_get_file_metadata_by_expr_result::~ThriftHiveMetastore_get_file_metadata_by_expr_result() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30812,18 +31689,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrif } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -30840,19 +31709,15 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_by_expr_result"); - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -30861,11 +31726,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_result::write(::apache::thri } -ThriftHiveMetastore_add_dynamic_partitions_presult::~ThriftHiveMetastore_add_dynamic_partitions_presult() throw() { +ThriftHiveMetastore_get_file_metadata_by_expr_presult::~ThriftHiveMetastore_get_file_metadata_by_expr_presult() throw() { } -uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_file_metadata_by_expr_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30885,18 +31750,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thri } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } @@ -30914,11 +31771,11 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_presult::read(::apache::thri } -ThriftHiveMetastore_get_next_notification_args::~ThriftHiveMetastore_get_next_notification_args() throw() { +ThriftHiveMetastore_get_file_metadata_args::~ThriftHiveMetastore_get_file_metadata_args() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_file_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -30940,8 +31797,8 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift:: { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->req.read(iprot); + this->__isset.req = true; } else { xfer += iprot->skip(ftype); } @@ -30958,13 +31815,13 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30974,17 +31831,17 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift: } -ThriftHiveMetastore_get_next_notification_pargs::~ThriftHiveMetastore_get_next_notification_pargs() throw() { +ThriftHiveMetastore_get_file_metadata_pargs::~ThriftHiveMetastore_get_file_metadata_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -30994,11 +31851,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift } -ThriftHiveMetastore_get_next_notification_result::~ThriftHiveMetastore_get_next_notification_result() throw() { +ThriftHiveMetastore_get_file_metadata_result::~ThriftHiveMetastore_get_file_metadata_result() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_file_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31038,11 +31895,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_result::read(::apache::thrift return xfer; } -uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_file_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_file_metadata_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -31055,11 +31912,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_result::write(::apache::thrif } -ThriftHiveMetastore_get_next_notification_presult::~ThriftHiveMetastore_get_next_notification_presult() throw() { +ThriftHiveMetastore_get_file_metadata_presult::~ThriftHiveMetastore_get_file_metadata_presult() throw() { } -uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_file_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31100,11 +31957,11 @@ uint32_t ThriftHiveMetastore_get_next_notification_presult::read(::apache::thrif } -ThriftHiveMetastore_get_current_notificationEventId_args::~ThriftHiveMetastore_get_current_notificationEventId_args() throw() { +ThriftHiveMetastore_put_file_metadata_args::~ThriftHiveMetastore_put_file_metadata_args() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_put_file_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31122,7 +31979,20 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -31131,10 +32001,14 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache return xfer; } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_put_file_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_put_file_metadata_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -31143,14 +32017,18 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apach } -ThriftHiveMetastore_get_current_notificationEventId_pargs::~ThriftHiveMetastore_get_current_notificationEventId_pargs() throw() { +ThriftHiveMetastore_put_file_metadata_pargs::~ThriftHiveMetastore_put_file_metadata_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_put_file_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_put_file_metadata_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -31159,11 +32037,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apac } -ThriftHiveMetastore_get_current_notificationEventId_result::~ThriftHiveMetastore_get_current_notificationEventId_result() throw() { +ThriftHiveMetastore_put_file_metadata_result::~ThriftHiveMetastore_put_file_metadata_result() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_put_file_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31203,11 +32081,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::read(::apac return xfer; } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_put_file_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_put_file_metadata_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -31220,11 +32098,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_result::write(::apa } -ThriftHiveMetastore_get_current_notificationEventId_presult::~ThriftHiveMetastore_get_current_notificationEventId_presult() throw() { +ThriftHiveMetastore_put_file_metadata_presult::~ThriftHiveMetastore_put_file_metadata_presult() throw() { } -uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_put_file_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31265,11 +32143,11 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_presult::read(::apa } -ThriftHiveMetastore_fire_listener_event_args::~ThriftHiveMetastore_fire_listener_event_args() throw() { +ThriftHiveMetastore_clear_file_metadata_args::~ThriftHiveMetastore_clear_file_metadata_args() throw() { } -uint32_t ThriftHiveMetastore_fire_listener_event_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_clear_file_metadata_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31291,8 +32169,8 @@ uint32_t ThriftHiveMetastore_fire_listener_event_args::read(::apache::thrift::pr { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rqst.read(iprot); - this->__isset.rqst = true; + xfer += this->req.read(iprot); + this->__isset.req = true; } else { xfer += iprot->skip(ftype); } @@ -31309,13 +32187,13 @@ uint32_t ThriftHiveMetastore_fire_listener_event_args::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_fire_listener_event_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_clear_file_metadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_clear_file_metadata_args"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rqst.write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -31325,17 +32203,17 @@ uint32_t ThriftHiveMetastore_fire_listener_event_args::write(::apache::thrift::p } -ThriftHiveMetastore_fire_listener_event_pargs::~ThriftHiveMetastore_fire_listener_event_pargs() throw() { +ThriftHiveMetastore_clear_file_metadata_pargs::~ThriftHiveMetastore_clear_file_metadata_pargs() throw() { } -uint32_t ThriftHiveMetastore_fire_listener_event_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_clear_file_metadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; oprot->incrementRecursionDepth(); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_clear_file_metadata_pargs"); - xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rqst)).write(oprot); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -31345,11 +32223,11 @@ uint32_t ThriftHiveMetastore_fire_listener_event_pargs::write(::apache::thrift:: } -ThriftHiveMetastore_fire_listener_event_result::~ThriftHiveMetastore_fire_listener_event_result() throw() { +ThriftHiveMetastore_clear_file_metadata_result::~ThriftHiveMetastore_clear_file_metadata_result() throw() { } -uint32_t ThriftHiveMetastore_fire_listener_event_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_clear_file_metadata_result::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -31389,11 +32267,11 @@ uint32_t ThriftHiveMetastore_fire_listener_event_result::read(::apache::thrift:: return xfer; } -uint32_t ThriftHiveMetastore_fire_listener_event_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_clear_file_metadata_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_clear_file_metadata_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -31406,11 +32284,11 @@ uint32_t ThriftHiveMetastore_fire_listener_event_result::write(::apache::thrift: } -ThriftHiveMetastore_fire_listener_event_presult::~ThriftHiveMetastore_fire_listener_event_presult() throw() { +ThriftHiveMetastore_clear_file_metadata_presult::~ThriftHiveMetastore_clear_file_metadata_presult() throw() { } -uint32_t ThriftHiveMetastore_fire_listener_event_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_clear_file_metadata_presult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -39391,6 +40269,290 @@ void ThriftHiveMetastoreClient::recv_fire_listener_event(FireEventResponse& _ret throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); } +void ThriftHiveMetastoreClient::flushCache() +{ + send_flushCache(); + recv_flushCache(); +} + +void ThriftHiveMetastoreClient::send_flushCache() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_flushCache_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_flushCache() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("flushCache") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_flushCache_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + return; +} + +void ThriftHiveMetastoreClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +{ + send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return); +} + +void ThriftHiveMetastoreClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_file_metadata_by_expr") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) +{ + send_get_file_metadata(req); + recv_get_file_metadata(_return); +} + +void ThriftHiveMetastoreClient::send_get_file_metadata(const GetFileMetadataRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_file_metadata(GetFileMetadataResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); +} + +void ThriftHiveMetastoreClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +{ + send_put_file_metadata(req); + recv_put_file_metadata(_return); +} + +void ThriftHiveMetastoreClient::send_put_file_metadata(const PutFileMetadataRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_put_file_metadata(PutFileMetadataResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("put_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_put_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); +} + +void ThriftHiveMetastoreClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +{ + send_clear_file_metadata(req); + recv_clear_file_metadata(_return); +} + +void ThriftHiveMetastoreClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_clear_file_metadata_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_clear_file_metadata(ClearFileMetadataResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("clear_file_metadata") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_clear_file_metadata_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); +} + bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { ProcessMap::iterator pfn; pfn = processMap_.find(fname); @@ -46805,6 +47967,275 @@ void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, :: } } +void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache"); + } + + ThriftHiveMetastore_flushCache_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes); + } + + ThriftHiveMetastore_flushCache_result result; + try { + iface_->flushCache(); + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache"); + } + + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata_by_expr", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + ThriftHiveMetastore_get_file_metadata_by_expr_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + } + + ThriftHiveMetastore_get_file_metadata_by_expr_result result; + try { + iface_->get_file_metadata_by_expr(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + } + + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ThriftHiveMetastore_get_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } + + ThriftHiveMetastore_get_file_metadata_result result; + try { + iface_->get_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); + } + + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ThriftHiveMetastore_put_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } + + ThriftHiveMetastore_put_file_metadata_result result; + try { + iface_->put_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); + } + + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ThriftHiveMetastore_clear_file_metadata_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } + + ThriftHiveMetastore_clear_file_metadata_result result; + try { + iface_->clear_file_metadata(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); + } + + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); + } +} + ::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 520c6e3..49d31e6 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -140,6 +140,11 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) = 0; virtual void get_current_notificationEventId(CurrentNotificationEventId& _return) = 0; virtual void fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) = 0; + virtual void flushCache() = 0; + virtual void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) = 0; + virtual void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) = 0; + virtual void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) = 0; + virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -564,6 +569,21 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void fire_listener_event(FireEventResponse& /* _return */, const FireEventRequest& /* rqst */) { return; } + void flushCache() { + return; + } + void get_file_metadata_by_expr(GetFileMetadataByExprResult& /* _return */, const GetFileMetadataByExprRequest& /* req */) { + return; + } + void get_file_metadata(GetFileMetadataResult& /* _return */, const GetFileMetadataRequest& /* req */) { + return; + } + void put_file_metadata(PutFileMetadataResult& /* _return */, const PutFileMetadataRequest& /* req */) { + return; + } + void clear_file_metadata(ClearFileMetadataResult& /* _return */, const ClearFileMetadataRequest& /* req */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -18193,6 +18213,576 @@ class ThriftHiveMetastore_fire_listener_event_presult { friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_fire_listener_event_presult& obj); }; + +class ThriftHiveMetastore_flushCache_args { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + ThriftHiveMetastore_flushCache_args(const ThriftHiveMetastore_flushCache_args&); + ThriftHiveMetastore_flushCache_args& operator=(const ThriftHiveMetastore_flushCache_args&); + ThriftHiveMetastore_flushCache_args() { + } + + virtual ~ThriftHiveMetastore_flushCache_args() throw(); + + bool operator == (const ThriftHiveMetastore_flushCache_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_flushCache_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_flushCache_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_args& obj); +}; + + +class ThriftHiveMetastore_flushCache_pargs { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + + virtual ~ThriftHiveMetastore_flushCache_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_pargs& obj); +}; + + +class ThriftHiveMetastore_flushCache_result { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + ThriftHiveMetastore_flushCache_result(const ThriftHiveMetastore_flushCache_result&); + ThriftHiveMetastore_flushCache_result& operator=(const ThriftHiveMetastore_flushCache_result&); + ThriftHiveMetastore_flushCache_result() { + } + + virtual ~ThriftHiveMetastore_flushCache_result() throw(); + + bool operator == (const ThriftHiveMetastore_flushCache_result & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_flushCache_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_flushCache_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_result& obj); +}; + + +class ThriftHiveMetastore_flushCache_presult { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + + virtual ~ThriftHiveMetastore_flushCache_presult() throw(); + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_presult& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_by_expr_args__isset { + _ThriftHiveMetastore_get_file_metadata_by_expr_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_file_metadata_by_expr_args__isset; + +class ThriftHiveMetastore_get_file_metadata_by_expr_args { + public: + + static const char* ascii_fingerprint; // = "35F3A2DA650F5293300EA6DB58284F86"; + static const uint8_t binary_fingerprint[16]; // = {0x35,0xF3,0xA2,0xDA,0x65,0x0F,0x52,0x93,0x30,0x0E,0xA6,0xDB,0x58,0x28,0x4F,0x86}; + + ThriftHiveMetastore_get_file_metadata_by_expr_args(const ThriftHiveMetastore_get_file_metadata_by_expr_args&); + ThriftHiveMetastore_get_file_metadata_by_expr_args& operator=(const ThriftHiveMetastore_get_file_metadata_by_expr_args&); + ThriftHiveMetastore_get_file_metadata_by_expr_args() { + } + + virtual ~ThriftHiveMetastore_get_file_metadata_by_expr_args() throw(); + GetFileMetadataByExprRequest req; + + _ThriftHiveMetastore_get_file_metadata_by_expr_args__isset __isset; + + void __set_req(const GetFileMetadataByExprRequest& val); + + bool operator == (const ThriftHiveMetastore_get_file_metadata_by_expr_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_file_metadata_by_expr_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_file_metadata_by_expr_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_by_expr_args& obj); +}; + + +class ThriftHiveMetastore_get_file_metadata_by_expr_pargs { + public: + + static const char* ascii_fingerprint; // = "35F3A2DA650F5293300EA6DB58284F86"; + static const uint8_t binary_fingerprint[16]; // = {0x35,0xF3,0xA2,0xDA,0x65,0x0F,0x52,0x93,0x30,0x0E,0xA6,0xDB,0x58,0x28,0x4F,0x86}; + + + virtual ~ThriftHiveMetastore_get_file_metadata_by_expr_pargs() throw(); + const GetFileMetadataByExprRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_by_expr_pargs& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_by_expr_result__isset { + _ThriftHiveMetastore_get_file_metadata_by_expr_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_file_metadata_by_expr_result__isset; + +class ThriftHiveMetastore_get_file_metadata_by_expr_result { + public: + + static const char* ascii_fingerprint; // = "E2053E1FBA55841322D49B2FBE16E310"; + static const uint8_t binary_fingerprint[16]; // = {0xE2,0x05,0x3E,0x1F,0xBA,0x55,0x84,0x13,0x22,0xD4,0x9B,0x2F,0xBE,0x16,0xE3,0x10}; + + ThriftHiveMetastore_get_file_metadata_by_expr_result(const ThriftHiveMetastore_get_file_metadata_by_expr_result&); + ThriftHiveMetastore_get_file_metadata_by_expr_result& operator=(const ThriftHiveMetastore_get_file_metadata_by_expr_result&); + ThriftHiveMetastore_get_file_metadata_by_expr_result() { + } + + virtual ~ThriftHiveMetastore_get_file_metadata_by_expr_result() throw(); + GetFileMetadataByExprResult success; + + _ThriftHiveMetastore_get_file_metadata_by_expr_result__isset __isset; + + void __set_success(const GetFileMetadataByExprResult& val); + + bool operator == (const ThriftHiveMetastore_get_file_metadata_by_expr_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_file_metadata_by_expr_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_file_metadata_by_expr_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_by_expr_result& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_by_expr_presult__isset { + _ThriftHiveMetastore_get_file_metadata_by_expr_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_file_metadata_by_expr_presult__isset; + +class ThriftHiveMetastore_get_file_metadata_by_expr_presult { + public: + + static const char* ascii_fingerprint; // = "E2053E1FBA55841322D49B2FBE16E310"; + static const uint8_t binary_fingerprint[16]; // = {0xE2,0x05,0x3E,0x1F,0xBA,0x55,0x84,0x13,0x22,0xD4,0x9B,0x2F,0xBE,0x16,0xE3,0x10}; + + + virtual ~ThriftHiveMetastore_get_file_metadata_by_expr_presult() throw(); + GetFileMetadataByExprResult* success; + + _ThriftHiveMetastore_get_file_metadata_by_expr_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_by_expr_presult& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_args__isset { + _ThriftHiveMetastore_get_file_metadata_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_file_metadata_args__isset; + +class ThriftHiveMetastore_get_file_metadata_args { + public: + + static const char* ascii_fingerprint; // = "1BC0267F37F033AE9EEA2EB33C70733E"; + static const uint8_t binary_fingerprint[16]; // = {0x1B,0xC0,0x26,0x7F,0x37,0xF0,0x33,0xAE,0x9E,0xEA,0x2E,0xB3,0x3C,0x70,0x73,0x3E}; + + ThriftHiveMetastore_get_file_metadata_args(const ThriftHiveMetastore_get_file_metadata_args&); + ThriftHiveMetastore_get_file_metadata_args& operator=(const ThriftHiveMetastore_get_file_metadata_args&); + ThriftHiveMetastore_get_file_metadata_args() { + } + + virtual ~ThriftHiveMetastore_get_file_metadata_args() throw(); + GetFileMetadataRequest req; + + _ThriftHiveMetastore_get_file_metadata_args__isset __isset; + + void __set_req(const GetFileMetadataRequest& val); + + bool operator == (const ThriftHiveMetastore_get_file_metadata_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_file_metadata_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_file_metadata_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_args& obj); +}; + + +class ThriftHiveMetastore_get_file_metadata_pargs { + public: + + static const char* ascii_fingerprint; // = "1BC0267F37F033AE9EEA2EB33C70733E"; + static const uint8_t binary_fingerprint[16]; // = {0x1B,0xC0,0x26,0x7F,0x37,0xF0,0x33,0xAE,0x9E,0xEA,0x2E,0xB3,0x3C,0x70,0x73,0x3E}; + + + virtual ~ThriftHiveMetastore_get_file_metadata_pargs() throw(); + const GetFileMetadataRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_pargs& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_result__isset { + _ThriftHiveMetastore_get_file_metadata_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_file_metadata_result__isset; + +class ThriftHiveMetastore_get_file_metadata_result { + public: + + static const char* ascii_fingerprint; // = "2E68003888122322D29FD7969DAE8C2A"; + static const uint8_t binary_fingerprint[16]; // = {0x2E,0x68,0x00,0x38,0x88,0x12,0x23,0x22,0xD2,0x9F,0xD7,0x96,0x9D,0xAE,0x8C,0x2A}; + + ThriftHiveMetastore_get_file_metadata_result(const ThriftHiveMetastore_get_file_metadata_result&); + ThriftHiveMetastore_get_file_metadata_result& operator=(const ThriftHiveMetastore_get_file_metadata_result&); + ThriftHiveMetastore_get_file_metadata_result() { + } + + virtual ~ThriftHiveMetastore_get_file_metadata_result() throw(); + GetFileMetadataResult success; + + _ThriftHiveMetastore_get_file_metadata_result__isset __isset; + + void __set_success(const GetFileMetadataResult& val); + + bool operator == (const ThriftHiveMetastore_get_file_metadata_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_file_metadata_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_file_metadata_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_result& obj); +}; + +typedef struct _ThriftHiveMetastore_get_file_metadata_presult__isset { + _ThriftHiveMetastore_get_file_metadata_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_file_metadata_presult__isset; + +class ThriftHiveMetastore_get_file_metadata_presult { + public: + + static const char* ascii_fingerprint; // = "2E68003888122322D29FD7969DAE8C2A"; + static const uint8_t binary_fingerprint[16]; // = {0x2E,0x68,0x00,0x38,0x88,0x12,0x23,0x22,0xD2,0x9F,0xD7,0x96,0x9D,0xAE,0x8C,0x2A}; + + + virtual ~ThriftHiveMetastore_get_file_metadata_presult() throw(); + GetFileMetadataResult* success; + + _ThriftHiveMetastore_get_file_metadata_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_get_file_metadata_presult& obj); +}; + +typedef struct _ThriftHiveMetastore_put_file_metadata_args__isset { + _ThriftHiveMetastore_put_file_metadata_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_put_file_metadata_args__isset; + +class ThriftHiveMetastore_put_file_metadata_args { + public: + + static const char* ascii_fingerprint; // = "CC8035DE851F68540899C9E7BDA51238"; + static const uint8_t binary_fingerprint[16]; // = {0xCC,0x80,0x35,0xDE,0x85,0x1F,0x68,0x54,0x08,0x99,0xC9,0xE7,0xBD,0xA5,0x12,0x38}; + + ThriftHiveMetastore_put_file_metadata_args(const ThriftHiveMetastore_put_file_metadata_args&); + ThriftHiveMetastore_put_file_metadata_args& operator=(const ThriftHiveMetastore_put_file_metadata_args&); + ThriftHiveMetastore_put_file_metadata_args() { + } + + virtual ~ThriftHiveMetastore_put_file_metadata_args() throw(); + PutFileMetadataRequest req; + + _ThriftHiveMetastore_put_file_metadata_args__isset __isset; + + void __set_req(const PutFileMetadataRequest& val); + + bool operator == (const ThriftHiveMetastore_put_file_metadata_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_put_file_metadata_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_put_file_metadata_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_put_file_metadata_args& obj); +}; + + +class ThriftHiveMetastore_put_file_metadata_pargs { + public: + + static const char* ascii_fingerprint; // = "CC8035DE851F68540899C9E7BDA51238"; + static const uint8_t binary_fingerprint[16]; // = {0xCC,0x80,0x35,0xDE,0x85,0x1F,0x68,0x54,0x08,0x99,0xC9,0xE7,0xBD,0xA5,0x12,0x38}; + + + virtual ~ThriftHiveMetastore_put_file_metadata_pargs() throw(); + const PutFileMetadataRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_put_file_metadata_pargs& obj); +}; + +typedef struct _ThriftHiveMetastore_put_file_metadata_result__isset { + _ThriftHiveMetastore_put_file_metadata_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_put_file_metadata_result__isset; + +class ThriftHiveMetastore_put_file_metadata_result { + public: + + static const char* ascii_fingerprint; // = "3A26B8DD823AB72F2FE94404F3D36070"; + static const uint8_t binary_fingerprint[16]; // = {0x3A,0x26,0xB8,0xDD,0x82,0x3A,0xB7,0x2F,0x2F,0xE9,0x44,0x04,0xF3,0xD3,0x60,0x70}; + + ThriftHiveMetastore_put_file_metadata_result(const ThriftHiveMetastore_put_file_metadata_result&); + ThriftHiveMetastore_put_file_metadata_result& operator=(const ThriftHiveMetastore_put_file_metadata_result&); + ThriftHiveMetastore_put_file_metadata_result() { + } + + virtual ~ThriftHiveMetastore_put_file_metadata_result() throw(); + PutFileMetadataResult success; + + _ThriftHiveMetastore_put_file_metadata_result__isset __isset; + + void __set_success(const PutFileMetadataResult& val); + + bool operator == (const ThriftHiveMetastore_put_file_metadata_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_put_file_metadata_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_put_file_metadata_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_put_file_metadata_result& obj); +}; + +typedef struct _ThriftHiveMetastore_put_file_metadata_presult__isset { + _ThriftHiveMetastore_put_file_metadata_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_put_file_metadata_presult__isset; + +class ThriftHiveMetastore_put_file_metadata_presult { + public: + + static const char* ascii_fingerprint; // = "3A26B8DD823AB72F2FE94404F3D36070"; + static const uint8_t binary_fingerprint[16]; // = {0x3A,0x26,0xB8,0xDD,0x82,0x3A,0xB7,0x2F,0x2F,0xE9,0x44,0x04,0xF3,0xD3,0x60,0x70}; + + + virtual ~ThriftHiveMetastore_put_file_metadata_presult() throw(); + PutFileMetadataResult* success; + + _ThriftHiveMetastore_put_file_metadata_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_put_file_metadata_presult& obj); +}; + +typedef struct _ThriftHiveMetastore_clear_file_metadata_args__isset { + _ThriftHiveMetastore_clear_file_metadata_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_clear_file_metadata_args__isset; + +class ThriftHiveMetastore_clear_file_metadata_args { + public: + + static const char* ascii_fingerprint; // = "1BC0267F37F033AE9EEA2EB33C70733E"; + static const uint8_t binary_fingerprint[16]; // = {0x1B,0xC0,0x26,0x7F,0x37,0xF0,0x33,0xAE,0x9E,0xEA,0x2E,0xB3,0x3C,0x70,0x73,0x3E}; + + ThriftHiveMetastore_clear_file_metadata_args(const ThriftHiveMetastore_clear_file_metadata_args&); + ThriftHiveMetastore_clear_file_metadata_args& operator=(const ThriftHiveMetastore_clear_file_metadata_args&); + ThriftHiveMetastore_clear_file_metadata_args() { + } + + virtual ~ThriftHiveMetastore_clear_file_metadata_args() throw(); + ClearFileMetadataRequest req; + + _ThriftHiveMetastore_clear_file_metadata_args__isset __isset; + + void __set_req(const ClearFileMetadataRequest& val); + + bool operator == (const ThriftHiveMetastore_clear_file_metadata_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_clear_file_metadata_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_clear_file_metadata_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_clear_file_metadata_args& obj); +}; + + +class ThriftHiveMetastore_clear_file_metadata_pargs { + public: + + static const char* ascii_fingerprint; // = "1BC0267F37F033AE9EEA2EB33C70733E"; + static const uint8_t binary_fingerprint[16]; // = {0x1B,0xC0,0x26,0x7F,0x37,0xF0,0x33,0xAE,0x9E,0xEA,0x2E,0xB3,0x3C,0x70,0x73,0x3E}; + + + virtual ~ThriftHiveMetastore_clear_file_metadata_pargs() throw(); + const ClearFileMetadataRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_clear_file_metadata_pargs& obj); +}; + +typedef struct _ThriftHiveMetastore_clear_file_metadata_result__isset { + _ThriftHiveMetastore_clear_file_metadata_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_clear_file_metadata_result__isset; + +class ThriftHiveMetastore_clear_file_metadata_result { + public: + + static const char* ascii_fingerprint; // = "3A26B8DD823AB72F2FE94404F3D36070"; + static const uint8_t binary_fingerprint[16]; // = {0x3A,0x26,0xB8,0xDD,0x82,0x3A,0xB7,0x2F,0x2F,0xE9,0x44,0x04,0xF3,0xD3,0x60,0x70}; + + ThriftHiveMetastore_clear_file_metadata_result(const ThriftHiveMetastore_clear_file_metadata_result&); + ThriftHiveMetastore_clear_file_metadata_result& operator=(const ThriftHiveMetastore_clear_file_metadata_result&); + ThriftHiveMetastore_clear_file_metadata_result() { + } + + virtual ~ThriftHiveMetastore_clear_file_metadata_result() throw(); + ClearFileMetadataResult success; + + _ThriftHiveMetastore_clear_file_metadata_result__isset __isset; + + void __set_success(const ClearFileMetadataResult& val); + + bool operator == (const ThriftHiveMetastore_clear_file_metadata_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_clear_file_metadata_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_clear_file_metadata_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_clear_file_metadata_result& obj); +}; + +typedef struct _ThriftHiveMetastore_clear_file_metadata_presult__isset { + _ThriftHiveMetastore_clear_file_metadata_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_clear_file_metadata_presult__isset; + +class ThriftHiveMetastore_clear_file_metadata_presult { + public: + + static const char* ascii_fingerprint; // = "3A26B8DD823AB72F2FE94404F3D36070"; + static const uint8_t binary_fingerprint[16]; // = {0x3A,0x26,0xB8,0xDD,0x82,0x3A,0xB7,0x2F,0x2F,0xE9,0x44,0x04,0xF3,0xD3,0x60,0x70}; + + + virtual ~ThriftHiveMetastore_clear_file_metadata_presult() throw(); + ClearFileMetadataResult* success; + + _ThriftHiveMetastore_clear_file_metadata_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + + friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_clear_file_metadata_presult& obj); +}; + class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { public: ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : @@ -18576,6 +19166,21 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst); void send_fire_listener_event(const FireEventRequest& rqst); void recv_fire_listener_event(FireEventResponse& _return); + void flushCache(); + void send_flushCache(); + void recv_flushCache(); + void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req); + void send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req); + void recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return); + void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req); + void send_get_file_metadata(const GetFileMetadataRequest& req); + void recv_get_file_metadata(GetFileMetadataResult& _return); + void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req); + void send_put_file_metadata(const PutFileMetadataRequest& req); + void recv_put_file_metadata(PutFileMetadataResult& _return); + void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req); + void send_clear_file_metadata(const ClearFileMetadataRequest& req); + void recv_clear_file_metadata(ClearFileMetadataResult& _return); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -18710,6 +19315,11 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_fire_listener_event(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -18838,6 +19448,11 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_next_notification"] = &ThriftHiveMetastoreProcessor::process_get_next_notification; processMap_["get_current_notificationEventId"] = &ThriftHiveMetastoreProcessor::process_get_current_notificationEventId; processMap_["fire_listener_event"] = &ThriftHiveMetastoreProcessor::process_fire_listener_event; + processMap_["flushCache"] = &ThriftHiveMetastoreProcessor::process_flushCache; + processMap_["get_file_metadata_by_expr"] = &ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr; + processMap_["get_file_metadata"] = &ThriftHiveMetastoreProcessor::process_get_file_metadata; + processMap_["put_file_metadata"] = &ThriftHiveMetastoreProcessor::process_put_file_metadata; + processMap_["clear_file_metadata"] = &ThriftHiveMetastoreProcessor::process_clear_file_metadata; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -20061,6 +20676,55 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void flushCache() { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->flushCache(); + } + ifaces_[i]->flushCache(); + } + + void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_file_metadata_by_expr(_return, req); + } + ifaces_[i]->get_file_metadata_by_expr(_return, req); + return; + } + + void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_file_metadata(_return, req); + } + ifaces_[i]->get_file_metadata(_return, req); + return; + } + + void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->put_file_metadata(_return, req); + } + ifaces_[i]->put_file_metadata(_return, req); + return; + } + + void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->clear_file_metadata(_return, req); + } + ifaces_[i]->clear_file_metadata(_return, req); + return; + } + }; }}} // namespace diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 0afcf8c..9eca65c 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -642,6 +642,31 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("fire_listener_event\n"); } + void flushCache() { + // Your implementation goes here + printf("flushCache\n"); + } + + void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { + // Your implementation goes here + printf("get_file_metadata_by_expr\n"); + } + + void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { + // Your implementation goes here + printf("get_file_metadata\n"); + } + + void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { + // Your implementation goes here + printf("put_file_metadata\n"); + } + + void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { + // Your implementation goes here + printf("clear_file_metadata\n"); + } + }; int main(int argc, char **argv) { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 57c3477..e61ba1b 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -14373,6 +14373,1076 @@ std::ostream& operator<<(std::ostream& out, const FireEventResponse& obj) { } +MetadataPpdResult::~MetadataPpdResult() throw() { +} + + +void MetadataPpdResult::__set_metadata(const std::string& val) { + this->metadata = val; +} + +void MetadataPpdResult::__set_includeBitset(const std::string& val) { + this->includeBitset = val; +} + +const char* MetadataPpdResult::ascii_fingerprint = "07A9615F837F7D0A952B595DD3020972"; +const uint8_t MetadataPpdResult::binary_fingerprint[16] = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; + +uint32_t MetadataPpdResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_metadata = false; + bool isset_includeBitset = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readBinary(this->metadata); + isset_metadata = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readBinary(this->includeBitset); + isset_includeBitset = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_metadata) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_includeBitset) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t MetadataPpdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("MetadataPpdResult"); + + xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeBinary(this->metadata); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("includeBitset", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeBinary(this->includeBitset); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(MetadataPpdResult &a, MetadataPpdResult &b) { + using ::std::swap; + swap(a.metadata, b.metadata); + swap(a.includeBitset, b.includeBitset); +} + +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other609) { + metadata = other609.metadata; + includeBitset = other609.includeBitset; +} +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other610) { + metadata = other610.metadata; + includeBitset = other610.includeBitset; + return *this; +} +std::ostream& operator<<(std::ostream& out, const MetadataPpdResult& obj) { + using apache::thrift::to_string; + out << "MetadataPpdResult("; + out << "metadata=" << to_string(obj.metadata); + out << ", " << "includeBitset=" << to_string(obj.includeBitset); + out << ")"; + return out; +} + + +GetFileMetadataByExprResult::~GetFileMetadataByExprResult() throw() { +} + + +void GetFileMetadataByExprResult::__set_metadata(const std::map & val) { + this->metadata = val; +} + +void GetFileMetadataByExprResult::__set_isSupported(const bool val) { + this->isSupported = val; +} + +void GetFileMetadataByExprResult::__set_unknownFileIds(const std::vector & val) { + this->unknownFileIds = val; +} + +const char* GetFileMetadataByExprResult::ascii_fingerprint = "2B0C1B8D7599529A5797481BE308375D"; +const uint8_t GetFileMetadataByExprResult::binary_fingerprint[16] = {0x2B,0x0C,0x1B,0x8D,0x75,0x99,0x52,0x9A,0x57,0x97,0x48,0x1B,0xE3,0x08,0x37,0x5D}; + +uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_metadata = false; + bool isset_isSupported = false; + bool isset_unknownFileIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->metadata.clear(); + uint32_t _size611; + ::apache::thrift::protocol::TType _ktype612; + ::apache::thrift::protocol::TType _vtype613; + xfer += iprot->readMapBegin(_ktype612, _vtype613, _size611); + uint32_t _i615; + for (_i615 = 0; _i615 < _size611; ++_i615) + { + int64_t _key616; + xfer += iprot->readI64(_key616); + MetadataPpdResult& _val617 = this->metadata[_key616]; + xfer += _val617.read(iprot); + } + xfer += iprot->readMapEnd(); + } + isset_metadata = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isSupported); + isset_isSupported = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->unknownFileIds.clear(); + uint32_t _size618; + ::apache::thrift::protocol::TType _etype621; + xfer += iprot->readListBegin(_etype621, _size618); + this->unknownFileIds.resize(_size618); + uint32_t _i622; + for (_i622 = 0; _i622 < _size618; ++_i622) + { + xfer += iprot->readI64(this->unknownFileIds[_i622]); + } + xfer += iprot->readListEnd(); + } + isset_unknownFileIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_metadata) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_isSupported) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_unknownFileIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("GetFileMetadataByExprResult"); + + xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); + std::map ::const_iterator _iter623; + for (_iter623 = this->metadata.begin(); _iter623 != this->metadata.end(); ++_iter623) + { + xfer += oprot->writeI64(_iter623->first); + xfer += _iter623->second.write(oprot); + } + xfer += oprot->writeMapEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("isSupported", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isSupported); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("unknownFileIds", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->unknownFileIds.size())); + std::vector ::const_iterator _iter624; + for (_iter624 = this->unknownFileIds.begin(); _iter624 != this->unknownFileIds.end(); ++_iter624) + { + xfer += oprot->writeI64((*_iter624)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { + using ::std::swap; + swap(a.metadata, b.metadata); + swap(a.isSupported, b.isSupported); + swap(a.unknownFileIds, b.unknownFileIds); +} + +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other625) { + metadata = other625.metadata; + isSupported = other625.isSupported; + unknownFileIds = other625.unknownFileIds; +} +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other626) { + metadata = other626.metadata; + isSupported = other626.isSupported; + unknownFileIds = other626.unknownFileIds; + return *this; +} +std::ostream& operator<<(std::ostream& out, const GetFileMetadataByExprResult& obj) { + using apache::thrift::to_string; + out << "GetFileMetadataByExprResult("; + out << "metadata=" << to_string(obj.metadata); + out << ", " << "isSupported=" << to_string(obj.isSupported); + out << ", " << "unknownFileIds=" << to_string(obj.unknownFileIds); + out << ")"; + return out; +} + + +GetFileMetadataByExprRequest::~GetFileMetadataByExprRequest() throw() { +} + + +void GetFileMetadataByExprRequest::__set_fileIds(const std::vector & val) { + this->fileIds = val; +} + +void GetFileMetadataByExprRequest::__set_expr(const std::string& val) { + this->expr = val; +} + +const char* GetFileMetadataByExprRequest::ascii_fingerprint = "925353917FC0AF87976A2338011F5A31"; +const uint8_t GetFileMetadataByExprRequest::binary_fingerprint[16] = {0x92,0x53,0x53,0x91,0x7F,0xC0,0xAF,0x87,0x97,0x6A,0x23,0x38,0x01,0x1F,0x5A,0x31}; + +uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_fileIds = false; + bool isset_expr = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->fileIds.clear(); + uint32_t _size627; + ::apache::thrift::protocol::TType _etype630; + xfer += iprot->readListBegin(_etype630, _size627); + this->fileIds.resize(_size627); + uint32_t _i631; + for (_i631 = 0; _i631 < _size627; ++_i631) + { + xfer += iprot->readI64(this->fileIds[_i631]); + } + xfer += iprot->readListEnd(); + } + isset_fileIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readBinary(this->expr); + isset_expr = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_fileIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_expr) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("GetFileMetadataByExprRequest"); + + xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); + std::vector ::const_iterator _iter632; + for (_iter632 = this->fileIds.begin(); _iter632 != this->fileIds.end(); ++_iter632) + { + xfer += oprot->writeI64((*_iter632)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("expr", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeBinary(this->expr); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { + using ::std::swap; + swap(a.fileIds, b.fileIds); + swap(a.expr, b.expr); +} + +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other633) { + fileIds = other633.fileIds; + expr = other633.expr; +} +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other634) { + fileIds = other634.fileIds; + expr = other634.expr; + return *this; +} +std::ostream& operator<<(std::ostream& out, const GetFileMetadataByExprRequest& obj) { + using apache::thrift::to_string; + out << "GetFileMetadataByExprRequest("; + out << "fileIds=" << to_string(obj.fileIds); + out << ", " << "expr=" << to_string(obj.expr); + out << ")"; + return out; +} + + +GetFileMetadataResult::~GetFileMetadataResult() throw() { +} + + +void GetFileMetadataResult::__set_metadata(const std::map & val) { + this->metadata = val; +} + +void GetFileMetadataResult::__set_isSupported(const bool val) { + this->isSupported = val; +} + +const char* GetFileMetadataResult::ascii_fingerprint = "D18BCBD4BA945E7F6500F5CD95205706"; +const uint8_t GetFileMetadataResult::binary_fingerprint[16] = {0xD1,0x8B,0xCB,0xD4,0xBA,0x94,0x5E,0x7F,0x65,0x00,0xF5,0xCD,0x95,0x20,0x57,0x06}; + +uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_metadata = false; + bool isset_isSupported = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->metadata.clear(); + uint32_t _size635; + ::apache::thrift::protocol::TType _ktype636; + ::apache::thrift::protocol::TType _vtype637; + xfer += iprot->readMapBegin(_ktype636, _vtype637, _size635); + uint32_t _i639; + for (_i639 = 0; _i639 < _size635; ++_i639) + { + int64_t _key640; + xfer += iprot->readI64(_key640); + std::string& _val641 = this->metadata[_key640]; + xfer += iprot->readBinary(_val641); + } + xfer += iprot->readMapEnd(); + } + isset_metadata = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->isSupported); + isset_isSupported = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_metadata) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_isSupported) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("GetFileMetadataResult"); + + xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); + std::map ::const_iterator _iter642; + for (_iter642 = this->metadata.begin(); _iter642 != this->metadata.end(); ++_iter642) + { + xfer += oprot->writeI64(_iter642->first); + xfer += oprot->writeBinary(_iter642->second); + } + xfer += oprot->writeMapEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("isSupported", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->isSupported); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { + using ::std::swap; + swap(a.metadata, b.metadata); + swap(a.isSupported, b.isSupported); +} + +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other643) { + metadata = other643.metadata; + isSupported = other643.isSupported; +} +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other644) { + metadata = other644.metadata; + isSupported = other644.isSupported; + return *this; +} +std::ostream& operator<<(std::ostream& out, const GetFileMetadataResult& obj) { + using apache::thrift::to_string; + out << "GetFileMetadataResult("; + out << "metadata=" << to_string(obj.metadata); + out << ", " << "isSupported=" << to_string(obj.isSupported); + out << ")"; + return out; +} + + +GetFileMetadataRequest::~GetFileMetadataRequest() throw() { +} + + +void GetFileMetadataRequest::__set_fileIds(const std::vector & val) { + this->fileIds = val; +} + +const char* GetFileMetadataRequest::ascii_fingerprint = "E49D7D1A9013CC81CD0F69D631EF82E4"; +const uint8_t GetFileMetadataRequest::binary_fingerprint[16] = {0xE4,0x9D,0x7D,0x1A,0x90,0x13,0xCC,0x81,0xCD,0x0F,0x69,0xD6,0x31,0xEF,0x82,0xE4}; + +uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_fileIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->fileIds.clear(); + uint32_t _size645; + ::apache::thrift::protocol::TType _etype648; + xfer += iprot->readListBegin(_etype648, _size645); + this->fileIds.resize(_size645); + uint32_t _i649; + for (_i649 = 0; _i649 < _size645; ++_i649) + { + xfer += iprot->readI64(this->fileIds[_i649]); + } + xfer += iprot->readListEnd(); + } + isset_fileIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_fileIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("GetFileMetadataRequest"); + + xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); + std::vector ::const_iterator _iter650; + for (_iter650 = this->fileIds.begin(); _iter650 != this->fileIds.end(); ++_iter650) + { + xfer += oprot->writeI64((*_iter650)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { + using ::std::swap; + swap(a.fileIds, b.fileIds); +} + +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other651) { + fileIds = other651.fileIds; +} +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other652) { + fileIds = other652.fileIds; + return *this; +} +std::ostream& operator<<(std::ostream& out, const GetFileMetadataRequest& obj) { + using apache::thrift::to_string; + out << "GetFileMetadataRequest("; + out << "fileIds=" << to_string(obj.fileIds); + out << ")"; + return out; +} + + +PutFileMetadataResult::~PutFileMetadataResult() throw() { +} + + +const char* PutFileMetadataResult::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B"; +const uint8_t PutFileMetadataResult::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + +uint32_t PutFileMetadataResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t PutFileMetadataResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("PutFileMetadataResult"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { + using ::std::swap; + (void) a; + (void) b; +} + +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other653) { + (void) other653; +} +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other654) { + (void) other654; + return *this; +} +std::ostream& operator<<(std::ostream& out, const PutFileMetadataResult& obj) { + using apache::thrift::to_string; + (void) obj; + out << "PutFileMetadataResult("; + out << ")"; + return out; +} + + +PutFileMetadataRequest::~PutFileMetadataRequest() throw() { +} + + +void PutFileMetadataRequest::__set_fileIds(const std::vector & val) { + this->fileIds = val; +} + +void PutFileMetadataRequest::__set_metadata(const std::vector & val) { + this->metadata = val; +} + +const char* PutFileMetadataRequest::ascii_fingerprint = "D64A208A8BCFCE146F4E2CB2176A807C"; +const uint8_t PutFileMetadataRequest::binary_fingerprint[16] = {0xD6,0x4A,0x20,0x8A,0x8B,0xCF,0xCE,0x14,0x6F,0x4E,0x2C,0xB2,0x17,0x6A,0x80,0x7C}; + +uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_fileIds = false; + bool isset_metadata = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->fileIds.clear(); + uint32_t _size655; + ::apache::thrift::protocol::TType _etype658; + xfer += iprot->readListBegin(_etype658, _size655); + this->fileIds.resize(_size655); + uint32_t _i659; + for (_i659 = 0; _i659 < _size655; ++_i659) + { + xfer += iprot->readI64(this->fileIds[_i659]); + } + xfer += iprot->readListEnd(); + } + isset_fileIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->metadata.clear(); + uint32_t _size660; + ::apache::thrift::protocol::TType _etype663; + xfer += iprot->readListBegin(_etype663, _size660); + this->metadata.resize(_size660); + uint32_t _i664; + for (_i664 = 0; _i664 < _size660; ++_i664) + { + xfer += iprot->readBinary(this->metadata[_i664]); + } + xfer += iprot->readListEnd(); + } + isset_metadata = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_fileIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_metadata) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("PutFileMetadataRequest"); + + xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); + std::vector ::const_iterator _iter665; + for (_iter665 = this->fileIds.begin(); _iter665 != this->fileIds.end(); ++_iter665) + { + xfer += oprot->writeI64((*_iter665)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); + std::vector ::const_iterator _iter666; + for (_iter666 = this->metadata.begin(); _iter666 != this->metadata.end(); ++_iter666) + { + xfer += oprot->writeBinary((*_iter666)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { + using ::std::swap; + swap(a.fileIds, b.fileIds); + swap(a.metadata, b.metadata); +} + +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other667) { + fileIds = other667.fileIds; + metadata = other667.metadata; +} +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other668) { + fileIds = other668.fileIds; + metadata = other668.metadata; + return *this; +} +std::ostream& operator<<(std::ostream& out, const PutFileMetadataRequest& obj) { + using apache::thrift::to_string; + out << "PutFileMetadataRequest("; + out << "fileIds=" << to_string(obj.fileIds); + out << ", " << "metadata=" << to_string(obj.metadata); + out << ")"; + return out; +} + + +ClearFileMetadataResult::~ClearFileMetadataResult() throw() { +} + + +const char* ClearFileMetadataResult::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B"; +const uint8_t ClearFileMetadataResult::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + +uint32_t ClearFileMetadataResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ClearFileMetadataResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ClearFileMetadataResult"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { + using ::std::swap; + (void) a; + (void) b; +} + +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other669) { + (void) other669; +} +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other670) { + (void) other670; + return *this; +} +std::ostream& operator<<(std::ostream& out, const ClearFileMetadataResult& obj) { + using apache::thrift::to_string; + (void) obj; + out << "ClearFileMetadataResult("; + out << ")"; + return out; +} + + +ClearFileMetadataRequest::~ClearFileMetadataRequest() throw() { +} + + +void ClearFileMetadataRequest::__set_fileIds(const std::vector & val) { + this->fileIds = val; +} + +const char* ClearFileMetadataRequest::ascii_fingerprint = "E49D7D1A9013CC81CD0F69D631EF82E4"; +const uint8_t ClearFileMetadataRequest::binary_fingerprint[16] = {0xE4,0x9D,0x7D,0x1A,0x90,0x13,0xCC,0x81,0xCD,0x0F,0x69,0xD6,0x31,0xEF,0x82,0xE4}; + +uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_fileIds = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->fileIds.clear(); + uint32_t _size671; + ::apache::thrift::protocol::TType _etype674; + xfer += iprot->readListBegin(_etype674, _size671); + this->fileIds.resize(_size671); + uint32_t _i675; + for (_i675 = 0; _i675 < _size671; ++_i675) + { + xfer += iprot->readI64(this->fileIds[_i675]); + } + xfer += iprot->readListEnd(); + } + isset_fileIds = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_fileIds) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + oprot->incrementRecursionDepth(); + xfer += oprot->writeStructBegin("ClearFileMetadataRequest"); + + xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); + std::vector ::const_iterator _iter676; + for (_iter676 = this->fileIds.begin(); _iter676 != this->fileIds.end(); ++_iter676) + { + xfer += oprot->writeI64((*_iter676)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + oprot->decrementRecursionDepth(); + return xfer; +} + +void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { + using ::std::swap; + swap(a.fileIds, b.fileIds); +} + +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other677) { + fileIds = other677.fileIds; +} +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other678) { + fileIds = other678.fileIds; + return *this; +} +std::ostream& operator<<(std::ostream& out, const ClearFileMetadataRequest& obj) { + using apache::thrift::to_string; + out << "ClearFileMetadataRequest("; + out << "fileIds=" << to_string(obj.fileIds); + out << ")"; + return out; +} + + GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { } @@ -14409,14 +15479,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size609; - ::apache::thrift::protocol::TType _etype612; - xfer += iprot->readListBegin(_etype612, _size609); - this->functions.resize(_size609); - uint32_t _i613; - for (_i613 = 0; _i613 < _size609; ++_i613) + uint32_t _size679; + ::apache::thrift::protocol::TType _etype682; + xfer += iprot->readListBegin(_etype682, _size679); + this->functions.resize(_size679); + uint32_t _i683; + for (_i683 = 0; _i683 < _size679; ++_i683) { - xfer += this->functions[_i613].read(iprot); + xfer += this->functions[_i683].read(iprot); } xfer += iprot->readListEnd(); } @@ -14446,10 +15516,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter614; - for (_iter614 = this->functions.begin(); _iter614 != this->functions.end(); ++_iter614) + std::vector ::const_iterator _iter684; + for (_iter684 = this->functions.begin(); _iter684 != this->functions.end(); ++_iter684) { - xfer += (*_iter614).write(oprot); + xfer += (*_iter684).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14467,13 +15537,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other615) { - functions = other615.functions; - __isset = other615.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other685) { + functions = other685.functions; + __isset = other685.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other616) { - functions = other616.functions; - __isset = other616.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other686) { + functions = other686.functions; + __isset = other686.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const GetAllFunctionsResponse& obj) { @@ -14557,13 +15627,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other617) : TException() { - message = other617.message; - __isset = other617.__isset; +MetaException::MetaException(const MetaException& other687) : TException() { + message = other687.message; + __isset = other687.__isset; } -MetaException& MetaException::operator=(const MetaException& other618) { - message = other618.message; - __isset = other618.__isset; +MetaException& MetaException::operator=(const MetaException& other688) { + message = other688.message; + __isset = other688.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const MetaException& obj) { @@ -14647,13 +15717,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other619) : TException() { - message = other619.message; - __isset = other619.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other689) : TException() { + message = other689.message; + __isset = other689.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other620) { - message = other620.message; - __isset = other620.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other690) { + message = other690.message; + __isset = other690.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const UnknownTableException& obj) { @@ -14737,13 +15807,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other621) : TException() { - message = other621.message; - __isset = other621.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other691) : TException() { + message = other691.message; + __isset = other691.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other622) { - message = other622.message; - __isset = other622.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other692) { + message = other692.message; + __isset = other692.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const UnknownDBException& obj) { @@ -14827,13 +15897,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other623) : TException() { - message = other623.message; - __isset = other623.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other693) : TException() { + message = other693.message; + __isset = other693.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other624) { - message = other624.message; - __isset = other624.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other694) { + message = other694.message; + __isset = other694.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const AlreadyExistsException& obj) { @@ -14917,13 +15987,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other625) : TException() { - message = other625.message; - __isset = other625.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other695) : TException() { + message = other695.message; + __isset = other695.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other626) { - message = other626.message; - __isset = other626.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other696) { + message = other696.message; + __isset = other696.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const InvalidPartitionException& obj) { @@ -15007,13 +16077,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other627) : TException() { - message = other627.message; - __isset = other627.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other697) : TException() { + message = other697.message; + __isset = other697.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other628) { - message = other628.message; - __isset = other628.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other698) { + message = other698.message; + __isset = other698.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const UnknownPartitionException& obj) { @@ -15097,13 +16167,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other629) : TException() { - message = other629.message; - __isset = other629.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other699) : TException() { + message = other699.message; + __isset = other699.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other630) { - message = other630.message; - __isset = other630.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other700) { + message = other700.message; + __isset = other700.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const InvalidObjectException& obj) { @@ -15187,13 +16257,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other631) : TException() { - message = other631.message; - __isset = other631.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other701) : TException() { + message = other701.message; + __isset = other701.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other632) { - message = other632.message; - __isset = other632.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other702) { + message = other702.message; + __isset = other702.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const NoSuchObjectException& obj) { @@ -15277,13 +16347,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other633) : TException() { - message = other633.message; - __isset = other633.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other703) : TException() { + message = other703.message; + __isset = other703.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other634) { - message = other634.message; - __isset = other634.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other704) { + message = other704.message; + __isset = other704.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const IndexAlreadyExistsException& obj) { @@ -15367,13 +16437,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other635) : TException() { - message = other635.message; - __isset = other635.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other705) : TException() { + message = other705.message; + __isset = other705.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other636) { - message = other636.message; - __isset = other636.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other706) { + message = other706.message; + __isset = other706.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const InvalidOperationException& obj) { @@ -15457,13 +16527,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other637) : TException() { - message = other637.message; - __isset = other637.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other707) : TException() { + message = other707.message; + __isset = other707.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other638) { - message = other638.message; - __isset = other638.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other708) { + message = other708.message; + __isset = other708.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const ConfigValSecurityException& obj) { @@ -15547,13 +16617,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other639) : TException() { - message = other639.message; - __isset = other639.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other709) : TException() { + message = other709.message; + __isset = other709.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other640) { - message = other640.message; - __isset = other640.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other710) { + message = other710.message; + __isset = other710.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const InvalidInputException& obj) { @@ -15637,13 +16707,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other641) : TException() { - message = other641.message; - __isset = other641.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other711) : TException() { + message = other711.message; + __isset = other711.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other642) { - message = other642.message; - __isset = other642.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other712) { + message = other712.message; + __isset = other712.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const NoSuchTxnException& obj) { @@ -15727,13 +16797,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other643) : TException() { - message = other643.message; - __isset = other643.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other713) : TException() { + message = other713.message; + __isset = other713.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other644) { - message = other644.message; - __isset = other644.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other714) { + message = other714.message; + __isset = other714.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const TxnAbortedException& obj) { @@ -15817,13 +16887,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other645) : TException() { - message = other645.message; - __isset = other645.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other715) : TException() { + message = other715.message; + __isset = other715.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other646) { - message = other646.message; - __isset = other646.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other716) { + message = other716.message; + __isset = other716.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const TxnOpenException& obj) { @@ -15907,13 +16977,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other647) : TException() { - message = other647.message; - __isset = other647.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other717) : TException() { + message = other717.message; + __isset = other717.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other648) { - message = other648.message; - __isset = other648.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other718) { + message = other718.message; + __isset = other718.__isset; return *this; } std::ostream& operator<<(std::ostream& out, const NoSuchLockException& obj) { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 36110e6..e072866 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -321,6 +321,24 @@ class FireEventRequest; class FireEventResponse; +class MetadataPpdResult; + +class GetFileMetadataByExprResult; + +class GetFileMetadataByExprRequest; + +class GetFileMetadataResult; + +class GetFileMetadataRequest; + +class PutFileMetadataResult; + +class PutFileMetadataRequest; + +class ClearFileMetadataResult; + +class ClearFileMetadataRequest; + class GetAllFunctionsResponse; class MetaException; @@ -5401,6 +5419,359 @@ class FireEventResponse { void swap(FireEventResponse &a, FireEventResponse &b); + +class MetadataPpdResult { + public: + + static const char* ascii_fingerprint; // = "07A9615F837F7D0A952B595DD3020972"; + static const uint8_t binary_fingerprint[16]; // = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72}; + + MetadataPpdResult(const MetadataPpdResult&); + MetadataPpdResult& operator=(const MetadataPpdResult&); + MetadataPpdResult() : metadata(), includeBitset() { + } + + virtual ~MetadataPpdResult() throw(); + std::string metadata; + std::string includeBitset; + + void __set_metadata(const std::string& val); + + void __set_includeBitset(const std::string& val); + + bool operator == (const MetadataPpdResult & rhs) const + { + if (!(metadata == rhs.metadata)) + return false; + if (!(includeBitset == rhs.includeBitset)) + return false; + return true; + } + bool operator != (const MetadataPpdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const MetadataPpdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const MetadataPpdResult& obj); +}; + +void swap(MetadataPpdResult &a, MetadataPpdResult &b); + + +class GetFileMetadataByExprResult { + public: + + static const char* ascii_fingerprint; // = "2B0C1B8D7599529A5797481BE308375D"; + static const uint8_t binary_fingerprint[16]; // = {0x2B,0x0C,0x1B,0x8D,0x75,0x99,0x52,0x9A,0x57,0x97,0x48,0x1B,0xE3,0x08,0x37,0x5D}; + + GetFileMetadataByExprResult(const GetFileMetadataByExprResult&); + GetFileMetadataByExprResult& operator=(const GetFileMetadataByExprResult&); + GetFileMetadataByExprResult() : isSupported(0) { + } + + virtual ~GetFileMetadataByExprResult() throw(); + std::map metadata; + bool isSupported; + std::vector unknownFileIds; + + void __set_metadata(const std::map & val); + + void __set_isSupported(const bool val); + + void __set_unknownFileIds(const std::vector & val); + + bool operator == (const GetFileMetadataByExprResult & rhs) const + { + if (!(metadata == rhs.metadata)) + return false; + if (!(isSupported == rhs.isSupported)) + return false; + if (!(unknownFileIds == rhs.unknownFileIds)) + return false; + return true; + } + bool operator != (const GetFileMetadataByExprResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetFileMetadataByExprResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const GetFileMetadataByExprResult& obj); +}; + +void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b); + + +class GetFileMetadataByExprRequest { + public: + + static const char* ascii_fingerprint; // = "925353917FC0AF87976A2338011F5A31"; + static const uint8_t binary_fingerprint[16]; // = {0x92,0x53,0x53,0x91,0x7F,0xC0,0xAF,0x87,0x97,0x6A,0x23,0x38,0x01,0x1F,0x5A,0x31}; + + GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest&); + GetFileMetadataByExprRequest& operator=(const GetFileMetadataByExprRequest&); + GetFileMetadataByExprRequest() : expr() { + } + + virtual ~GetFileMetadataByExprRequest() throw(); + std::vector fileIds; + std::string expr; + + void __set_fileIds(const std::vector & val); + + void __set_expr(const std::string& val); + + bool operator == (const GetFileMetadataByExprRequest & rhs) const + { + if (!(fileIds == rhs.fileIds)) + return false; + if (!(expr == rhs.expr)) + return false; + return true; + } + bool operator != (const GetFileMetadataByExprRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetFileMetadataByExprRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const GetFileMetadataByExprRequest& obj); +}; + +void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b); + + +class GetFileMetadataResult { + public: + + static const char* ascii_fingerprint; // = "D18BCBD4BA945E7F6500F5CD95205706"; + static const uint8_t binary_fingerprint[16]; // = {0xD1,0x8B,0xCB,0xD4,0xBA,0x94,0x5E,0x7F,0x65,0x00,0xF5,0xCD,0x95,0x20,0x57,0x06}; + + GetFileMetadataResult(const GetFileMetadataResult&); + GetFileMetadataResult& operator=(const GetFileMetadataResult&); + GetFileMetadataResult() : isSupported(0) { + } + + virtual ~GetFileMetadataResult() throw(); + std::map metadata; + bool isSupported; + + void __set_metadata(const std::map & val); + + void __set_isSupported(const bool val); + + bool operator == (const GetFileMetadataResult & rhs) const + { + if (!(metadata == rhs.metadata)) + return false; + if (!(isSupported == rhs.isSupported)) + return false; + return true; + } + bool operator != (const GetFileMetadataResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetFileMetadataResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const GetFileMetadataResult& obj); +}; + +void swap(GetFileMetadataResult &a, GetFileMetadataResult &b); + + +class GetFileMetadataRequest { + public: + + static const char* ascii_fingerprint; // = "E49D7D1A9013CC81CD0F69D631EF82E4"; + static const uint8_t binary_fingerprint[16]; // = {0xE4,0x9D,0x7D,0x1A,0x90,0x13,0xCC,0x81,0xCD,0x0F,0x69,0xD6,0x31,0xEF,0x82,0xE4}; + + GetFileMetadataRequest(const GetFileMetadataRequest&); + GetFileMetadataRequest& operator=(const GetFileMetadataRequest&); + GetFileMetadataRequest() { + } + + virtual ~GetFileMetadataRequest() throw(); + std::vector fileIds; + + void __set_fileIds(const std::vector & val); + + bool operator == (const GetFileMetadataRequest & rhs) const + { + if (!(fileIds == rhs.fileIds)) + return false; + return true; + } + bool operator != (const GetFileMetadataRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetFileMetadataRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const GetFileMetadataRequest& obj); +}; + +void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b); + + +class PutFileMetadataResult { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + PutFileMetadataResult(const PutFileMetadataResult&); + PutFileMetadataResult& operator=(const PutFileMetadataResult&); + PutFileMetadataResult() { + } + + virtual ~PutFileMetadataResult() throw(); + + bool operator == (const PutFileMetadataResult & /* rhs */) const + { + return true; + } + bool operator != (const PutFileMetadataResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const PutFileMetadataResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const PutFileMetadataResult& obj); +}; + +void swap(PutFileMetadataResult &a, PutFileMetadataResult &b); + + +class PutFileMetadataRequest { + public: + + static const char* ascii_fingerprint; // = "D64A208A8BCFCE146F4E2CB2176A807C"; + static const uint8_t binary_fingerprint[16]; // = {0xD6,0x4A,0x20,0x8A,0x8B,0xCF,0xCE,0x14,0x6F,0x4E,0x2C,0xB2,0x17,0x6A,0x80,0x7C}; + + PutFileMetadataRequest(const PutFileMetadataRequest&); + PutFileMetadataRequest& operator=(const PutFileMetadataRequest&); + PutFileMetadataRequest() { + } + + virtual ~PutFileMetadataRequest() throw(); + std::vector fileIds; + std::vector metadata; + + void __set_fileIds(const std::vector & val); + + void __set_metadata(const std::vector & val); + + bool operator == (const PutFileMetadataRequest & rhs) const + { + if (!(fileIds == rhs.fileIds)) + return false; + if (!(metadata == rhs.metadata)) + return false; + return true; + } + bool operator != (const PutFileMetadataRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const PutFileMetadataRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const PutFileMetadataRequest& obj); +}; + +void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b); + + +class ClearFileMetadataResult { + public: + + static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B"; + static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B}; + + ClearFileMetadataResult(const ClearFileMetadataResult&); + ClearFileMetadataResult& operator=(const ClearFileMetadataResult&); + ClearFileMetadataResult() { + } + + virtual ~ClearFileMetadataResult() throw(); + + bool operator == (const ClearFileMetadataResult & /* rhs */) const + { + return true; + } + bool operator != (const ClearFileMetadataResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ClearFileMetadataResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ClearFileMetadataResult& obj); +}; + +void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b); + + +class ClearFileMetadataRequest { + public: + + static const char* ascii_fingerprint; // = "E49D7D1A9013CC81CD0F69D631EF82E4"; + static const uint8_t binary_fingerprint[16]; // = {0xE4,0x9D,0x7D,0x1A,0x90,0x13,0xCC,0x81,0xCD,0x0F,0x69,0xD6,0x31,0xEF,0x82,0xE4}; + + ClearFileMetadataRequest(const ClearFileMetadataRequest&); + ClearFileMetadataRequest& operator=(const ClearFileMetadataRequest&); + ClearFileMetadataRequest() { + } + + virtual ~ClearFileMetadataRequest() throw(); + std::vector fileIds; + + void __set_fileIds(const std::vector & val); + + bool operator == (const ClearFileMetadataRequest & rhs) const + { + if (!(fileIds == rhs.fileIds)) + return false; + return true; + } + bool operator != (const ClearFileMetadataRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ClearFileMetadataRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + friend std::ostream& operator<<(std::ostream& out, const ClearFileMetadataRequest& obj); +}; + +void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b); + typedef struct _GetAllFunctionsResponse__isset { _GetAllFunctionsResponse__isset() : functions(false) {} bool functions :1; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java index 37e5bf1..73e0ffd 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AbortTxnRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 9c78c49..8652d47 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AddDynamicPartitions implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddDynamicPartitions"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index fcfaaf3..dde146d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AddPartitionsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index 9022019..922aa42 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AddPartitionsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index 917cec0..9dbc5c5 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AggrStats implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AggrStats"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java index d7a317b..2290762 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class AlreadyExistsException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExistsException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java index 00b312d..32b8916 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class BinaryColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BinaryColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java index a0f3ab8..c019753 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class BooleanColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BooleanColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java index 82e3031..1efa060 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class CheckLockRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckLockRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java new file mode 100644 index 0000000..04408a6 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -0,0 +1,438 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class ClearFileMetadataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClearFileMetadataRequest"); + + private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ClearFileMetadataRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ClearFileMetadataRequestTupleSchemeFactory()); + } + + private List fileIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FILE_IDS((short)1, "fileIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FILE_IDS + return FILE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClearFileMetadataRequest.class, metaDataMap); + } + + public ClearFileMetadataRequest() { + } + + public ClearFileMetadataRequest( + List fileIds) + { + this(); + this.fileIds = fileIds; + } + + /** + * Performs a deep copy on other. + */ + public ClearFileMetadataRequest(ClearFileMetadataRequest other) { + if (other.isSetFileIds()) { + List __this__fileIds = new ArrayList(other.fileIds); + this.fileIds = __this__fileIds; + } + } + + public ClearFileMetadataRequest deepCopy() { + return new ClearFileMetadataRequest(this); + } + + @Override + public void clear() { + this.fileIds = null; + } + + public int getFileIdsSize() { + return (this.fileIds == null) ? 0 : this.fileIds.size(); + } + + public java.util.Iterator getFileIdsIterator() { + return (this.fileIds == null) ? null : this.fileIds.iterator(); + } + + public void addToFileIds(long elem) { + if (this.fileIds == null) { + this.fileIds = new ArrayList(); + } + this.fileIds.add(elem); + } + + public List getFileIds() { + return this.fileIds; + } + + public void setFileIds(List fileIds) { + this.fileIds = fileIds; + } + + public void unsetFileIds() { + this.fileIds = null; + } + + /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */ + public boolean isSetFileIds() { + return this.fileIds != null; + } + + public void setFileIdsIsSet(boolean value) { + if (!value) { + this.fileIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FILE_IDS: + if (value == null) { + unsetFileIds(); + } else { + setFileIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FILE_IDS: + return getFileIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FILE_IDS: + return isSetFileIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ClearFileMetadataRequest) + return this.equals((ClearFileMetadataRequest)that); + return false; + } + + public boolean equals(ClearFileMetadataRequest that) { + if (that == null) + return false; + + boolean this_present_fileIds = true && this.isSetFileIds(); + boolean that_present_fileIds = true && that.isSetFileIds(); + if (this_present_fileIds || that_present_fileIds) { + if (!(this_present_fileIds && that_present_fileIds)) + return false; + if (!this.fileIds.equals(that.fileIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fileIds = true && (isSetFileIds()); + list.add(present_fileIds); + if (present_fileIds) + list.add(fileIds); + + return list.hashCode(); + } + + @Override + public int compareTo(ClearFileMetadataRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFileIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ClearFileMetadataRequest("); + boolean first = true; + + sb.append("fileIds:"); + if (this.fileIds == null) { + sb.append("null"); + } else { + sb.append(this.fileIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFileIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ClearFileMetadataRequestStandardSchemeFactory implements SchemeFactory { + public ClearFileMetadataRequestStandardScheme getScheme() { + return new ClearFileMetadataRequestStandardScheme(); + } + } + + private static class ClearFileMetadataRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FILE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list584 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list584.size); + long _elem585; + for (int _i586 = 0; _i586 < _list584.size; ++_i586) + { + _elem585 = iprot.readI64(); + struct.fileIds.add(_elem585); + } + iprot.readListEnd(); + } + struct.setFileIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fileIds != null) { + oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); + for (long _iter587 : struct.fileIds) + { + oprot.writeI64(_iter587); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ClearFileMetadataRequestTupleSchemeFactory implements SchemeFactory { + public ClearFileMetadataRequestTupleScheme getScheme() { + return new ClearFileMetadataRequestTupleScheme(); + } + } + + private static class ClearFileMetadataRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.fileIds.size()); + for (long _iter588 : struct.fileIds) + { + oprot.writeI64(_iter588); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list589 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list589.size); + long _elem590; + for (int _i591 = 0; _i591 < _list589.size; ++_i591) + { + _elem590 = iprot.readI64(); + struct.fileIds.add(_elem590); + } + } + struct.setFileIdsIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java new file mode 100644 index 0000000..4d9dfb8 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class ClearFileMetadataResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClearFileMetadataResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ClearFileMetadataResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ClearFileMetadataResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClearFileMetadataResult.class, metaDataMap); + } + + public ClearFileMetadataResult() { + } + + /** + * Performs a deep copy on other. + */ + public ClearFileMetadataResult(ClearFileMetadataResult other) { + } + + public ClearFileMetadataResult deepCopy() { + return new ClearFileMetadataResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ClearFileMetadataResult) + return this.equals((ClearFileMetadataResult)that); + return false; + } + + public boolean equals(ClearFileMetadataResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(ClearFileMetadataResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ClearFileMetadataResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ClearFileMetadataResultStandardSchemeFactory implements SchemeFactory { + public ClearFileMetadataResultStandardScheme getScheme() { + return new ClearFileMetadataResultStandardScheme(); + } + } + + private static class ClearFileMetadataResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ClearFileMetadataResultTupleSchemeFactory implements SchemeFactory { + public ClearFileMetadataResultTupleScheme getScheme() { + return new ClearFileMetadataResultTupleScheme(); + } + } + + private static class ClearFileMetadataResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index 510dace..55cfab0 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ColumnStatistics implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatistics"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java index cf967b6..ad72c3d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ColumnStatisticsDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsDesc"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java index 3c2b123..4fbe506 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ColumnStatisticsObj implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsObj"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java index 5e8cd04..93ff732 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class CommitTxnRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CommitTxnRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index e9088e0..688706e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class CompactionRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CompactionRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java index 000670a..e92b6d6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ConfigValSecurityException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java index d55d874..a3acf64 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class CurrentNotificationEventId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CurrentNotificationEventId"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 56b7281..35c63b6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Database implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Database"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java index 0b406d6..b762895 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Date implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Date"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java index 7a3d4ed..e669ee8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DateColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DateColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java index 9215ce9..e54c906 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Decimal implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Decimal"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java index e64ca36..74bbe33 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DecimalColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DecimalColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java index 2509ed5..48a742f 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DoubleColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DoubleColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java index 5e3a2d1..2552cbd 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DropPartitionsExpr implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsExpr"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java index 24536ba..f6c873a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DropPartitionsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index c139e65..697e1b8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class DropPartitionsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsResult"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index 6accb8d..9c80329 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class EnvironmentContext implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("EnvironmentContext"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java index ba69622..de53201 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class FieldSchema implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FieldSchema"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index f3d439c..04b6f0a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class FireEventRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java index d95ae06..c3234f2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class FireEventResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index 50eff73..e6b847d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Function implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Function"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 0a9e27b..a98db18 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetAllFunctionsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetAllFunctionsResponse"); @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); - struct.functions = new ArrayList(_list524.size); - Function _elem525; - for (int _i526 = 0; _i526 < _list524.size; ++_i526) + org.apache.thrift.protocol.TList _list592 = iprot.readListBegin(); + struct.functions = new ArrayList(_list592.size); + Function _elem593; + for (int _i594 = 0; _i594 < _list592.size; ++_i594) { - _elem525 = new Function(); - _elem525.read(iprot); - struct.functions.add(_elem525); + _elem593 = new Function(); + _elem593.read(iprot); + struct.functions.add(_elem593); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter527 : struct.functions) + for (Function _iter595 : struct.functions) { - _iter527.write(oprot); + _iter595.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter528 : struct.functions) + for (Function _iter596 : struct.functions) { - _iter528.write(oprot); + _iter596.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list529.size); - Function _elem530; - for (int _i531 = 0; _i531 < _list529.size; ++_i531) + org.apache.thrift.protocol.TList _list597 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list597.size); + Function _elem598; + for (int _i599 = 0; _i599 < _list597.size; ++_i599) { - _elem530 = new Function(); - _elem530.read(iprot); - struct.functions.add(_elem530); + _elem598 = new Function(); + _elem598.read(iprot); + struct.functions.add(_elem598); } } struct.setFunctionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java new file mode 100644 index 0000000..3d69606 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -0,0 +1,548 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class GetFileMetadataByExprRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataByExprRequest"); + + private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetFileMetadataByExprRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetFileMetadataByExprRequestTupleSchemeFactory()); + } + + private List fileIds; // required + private ByteBuffer expr; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FILE_IDS((short)1, "fileIds"), + EXPR((short)2, "expr"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FILE_IDS + return FILE_IDS; + case 2: // EXPR + return EXPR; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.EXPR, new org.apache.thrift.meta_data.FieldMetaData("expr", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataByExprRequest.class, metaDataMap); + } + + public GetFileMetadataByExprRequest() { + } + + public GetFileMetadataByExprRequest( + List fileIds, + ByteBuffer expr) + { + this(); + this.fileIds = fileIds; + this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr); + } + + /** + * Performs a deep copy on other. + */ + public GetFileMetadataByExprRequest(GetFileMetadataByExprRequest other) { + if (other.isSetFileIds()) { + List __this__fileIds = new ArrayList(other.fileIds); + this.fileIds = __this__fileIds; + } + if (other.isSetExpr()) { + this.expr = org.apache.thrift.TBaseHelper.copyBinary(other.expr); + } + } + + public GetFileMetadataByExprRequest deepCopy() { + return new GetFileMetadataByExprRequest(this); + } + + @Override + public void clear() { + this.fileIds = null; + this.expr = null; + } + + public int getFileIdsSize() { + return (this.fileIds == null) ? 0 : this.fileIds.size(); + } + + public java.util.Iterator getFileIdsIterator() { + return (this.fileIds == null) ? null : this.fileIds.iterator(); + } + + public void addToFileIds(long elem) { + if (this.fileIds == null) { + this.fileIds = new ArrayList(); + } + this.fileIds.add(elem); + } + + public List getFileIds() { + return this.fileIds; + } + + public void setFileIds(List fileIds) { + this.fileIds = fileIds; + } + + public void unsetFileIds() { + this.fileIds = null; + } + + /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */ + public boolean isSetFileIds() { + return this.fileIds != null; + } + + public void setFileIdsIsSet(boolean value) { + if (!value) { + this.fileIds = null; + } + } + + public byte[] getExpr() { + setExpr(org.apache.thrift.TBaseHelper.rightSize(expr)); + return expr == null ? null : expr.array(); + } + + public ByteBuffer bufferForExpr() { + return org.apache.thrift.TBaseHelper.copyBinary(expr); + } + + public void setExpr(byte[] expr) { + this.expr = expr == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(expr, expr.length)); + } + + public void setExpr(ByteBuffer expr) { + this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr); + } + + public void unsetExpr() { + this.expr = null; + } + + /** Returns true if field expr is set (has been assigned a value) and false otherwise */ + public boolean isSetExpr() { + return this.expr != null; + } + + public void setExprIsSet(boolean value) { + if (!value) { + this.expr = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FILE_IDS: + if (value == null) { + unsetFileIds(); + } else { + setFileIds((List)value); + } + break; + + case EXPR: + if (value == null) { + unsetExpr(); + } else { + setExpr((ByteBuffer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FILE_IDS: + return getFileIds(); + + case EXPR: + return getExpr(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FILE_IDS: + return isSetFileIds(); + case EXPR: + return isSetExpr(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetFileMetadataByExprRequest) + return this.equals((GetFileMetadataByExprRequest)that); + return false; + } + + public boolean equals(GetFileMetadataByExprRequest that) { + if (that == null) + return false; + + boolean this_present_fileIds = true && this.isSetFileIds(); + boolean that_present_fileIds = true && that.isSetFileIds(); + if (this_present_fileIds || that_present_fileIds) { + if (!(this_present_fileIds && that_present_fileIds)) + return false; + if (!this.fileIds.equals(that.fileIds)) + return false; + } + + boolean this_present_expr = true && this.isSetExpr(); + boolean that_present_expr = true && that.isSetExpr(); + if (this_present_expr || that_present_expr) { + if (!(this_present_expr && that_present_expr)) + return false; + if (!this.expr.equals(that.expr)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fileIds = true && (isSetFileIds()); + list.add(present_fileIds); + if (present_fileIds) + list.add(fileIds); + + boolean present_expr = true && (isSetExpr()); + list.add(present_expr); + if (present_expr) + list.add(expr); + + return list.hashCode(); + } + + @Override + public int compareTo(GetFileMetadataByExprRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFileIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetExpr()).compareTo(other.isSetExpr()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetExpr()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expr, other.expr); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetFileMetadataByExprRequest("); + boolean first = true; + + sb.append("fileIds:"); + if (this.fileIds == null) { + sb.append("null"); + } else { + sb.append(this.fileIds); + } + first = false; + if (!first) sb.append(", "); + sb.append("expr:"); + if (this.expr == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.expr, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFileIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString()); + } + + if (!isSetExpr()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'expr' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetFileMetadataByExprRequestStandardSchemeFactory implements SchemeFactory { + public GetFileMetadataByExprRequestStandardScheme getScheme() { + return new GetFileMetadataByExprRequestStandardScheme(); + } + } + + private static class GetFileMetadataByExprRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FILE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list542 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list542.size); + long _elem543; + for (int _i544 = 0; _i544 < _list542.size; ++_i544) + { + _elem543 = iprot.readI64(); + struct.fileIds.add(_elem543); + } + iprot.readListEnd(); + } + struct.setFileIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // EXPR + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.expr = iprot.readBinary(); + struct.setExprIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fileIds != null) { + oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); + for (long _iter545 : struct.fileIds) + { + oprot.writeI64(_iter545); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.expr != null) { + oprot.writeFieldBegin(EXPR_FIELD_DESC); + oprot.writeBinary(struct.expr); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetFileMetadataByExprRequestTupleSchemeFactory implements SchemeFactory { + public GetFileMetadataByExprRequestTupleScheme getScheme() { + return new GetFileMetadataByExprRequestTupleScheme(); + } + } + + private static class GetFileMetadataByExprRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.fileIds.size()); + for (long _iter546 : struct.fileIds) + { + oprot.writeI64(_iter546); + } + } + oprot.writeBinary(struct.expr); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list547 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list547.size); + long _elem548; + for (int _i549 = 0; _i549 < _list547.size; ++_i549) + { + _elem548 = iprot.readI64(); + struct.fileIds.add(_elem548); + } + } + struct.setFileIdsIsSet(true); + struct.expr = iprot.readBinary(); + struct.setExprIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java new file mode 100644 index 0000000..3ac9921 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -0,0 +1,703 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class GetFileMetadataByExprResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataByExprResult"); + + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField IS_SUPPORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("isSupported", org.apache.thrift.protocol.TType.BOOL, (short)2); + private static final org.apache.thrift.protocol.TField UNKNOWN_FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("unknownFileIds", org.apache.thrift.protocol.TType.LIST, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetFileMetadataByExprResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetFileMetadataByExprResultTupleSchemeFactory()); + } + + private Map metadata; // required + private boolean isSupported; // required + private List unknownFileIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + METADATA((short)1, "metadata"), + IS_SUPPORTED((short)2, "isSupported"), + UNKNOWN_FILE_IDS((short)3, "unknownFileIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // METADATA + return METADATA; + case 2: // IS_SUPPORTED + return IS_SUPPORTED; + case 3: // UNKNOWN_FILE_IDS + return UNKNOWN_FILE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ISSUPPORTED_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64), + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetadataPpdResult.class)))); + tmpMap.put(_Fields.IS_SUPPORTED, new org.apache.thrift.meta_data.FieldMetaData("isSupported", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.UNKNOWN_FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("unknownFileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataByExprResult.class, metaDataMap); + } + + public GetFileMetadataByExprResult() { + } + + public GetFileMetadataByExprResult( + Map metadata, + boolean isSupported, + List unknownFileIds) + { + this(); + this.metadata = metadata; + this.isSupported = isSupported; + setIsSupportedIsSet(true); + this.unknownFileIds = unknownFileIds; + } + + /** + * Performs a deep copy on other. + */ + public GetFileMetadataByExprResult(GetFileMetadataByExprResult other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetMetadata()) { + Map __this__metadata = new HashMap(other.metadata.size()); + for (Map.Entry other_element : other.metadata.entrySet()) { + + Long other_element_key = other_element.getKey(); + MetadataPpdResult other_element_value = other_element.getValue(); + + Long __this__metadata_copy_key = other_element_key; + + MetadataPpdResult __this__metadata_copy_value = new MetadataPpdResult(other_element_value); + + __this__metadata.put(__this__metadata_copy_key, __this__metadata_copy_value); + } + this.metadata = __this__metadata; + } + this.isSupported = other.isSupported; + if (other.isSetUnknownFileIds()) { + List __this__unknownFileIds = new ArrayList(other.unknownFileIds); + this.unknownFileIds = __this__unknownFileIds; + } + } + + public GetFileMetadataByExprResult deepCopy() { + return new GetFileMetadataByExprResult(this); + } + + @Override + public void clear() { + this.metadata = null; + setIsSupportedIsSet(false); + this.isSupported = false; + this.unknownFileIds = null; + } + + public int getMetadataSize() { + return (this.metadata == null) ? 0 : this.metadata.size(); + } + + public void putToMetadata(long key, MetadataPpdResult val) { + if (this.metadata == null) { + this.metadata = new HashMap(); + } + this.metadata.put(key, val); + } + + public Map getMetadata() { + return this.metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public boolean isIsSupported() { + return this.isSupported; + } + + public void setIsSupported(boolean isSupported) { + this.isSupported = isSupported; + setIsSupportedIsSet(true); + } + + public void unsetIsSupported() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID); + } + + /** Returns true if field isSupported is set (has been assigned a value) and false otherwise */ + public boolean isSetIsSupported() { + return EncodingUtils.testBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID); + } + + public void setIsSupportedIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID, value); + } + + public int getUnknownFileIdsSize() { + return (this.unknownFileIds == null) ? 0 : this.unknownFileIds.size(); + } + + public java.util.Iterator getUnknownFileIdsIterator() { + return (this.unknownFileIds == null) ? null : this.unknownFileIds.iterator(); + } + + public void addToUnknownFileIds(long elem) { + if (this.unknownFileIds == null) { + this.unknownFileIds = new ArrayList(); + } + this.unknownFileIds.add(elem); + } + + public List getUnknownFileIds() { + return this.unknownFileIds; + } + + public void setUnknownFileIds(List unknownFileIds) { + this.unknownFileIds = unknownFileIds; + } + + public void unsetUnknownFileIds() { + this.unknownFileIds = null; + } + + /** Returns true if field unknownFileIds is set (has been assigned a value) and false otherwise */ + public boolean isSetUnknownFileIds() { + return this.unknownFileIds != null; + } + + public void setUnknownFileIdsIsSet(boolean value) { + if (!value) { + this.unknownFileIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((Map)value); + } + break; + + case IS_SUPPORTED: + if (value == null) { + unsetIsSupported(); + } else { + setIsSupported((Boolean)value); + } + break; + + case UNKNOWN_FILE_IDS: + if (value == null) { + unsetUnknownFileIds(); + } else { + setUnknownFileIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case METADATA: + return getMetadata(); + + case IS_SUPPORTED: + return Boolean.valueOf(isIsSupported()); + + case UNKNOWN_FILE_IDS: + return getUnknownFileIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case METADATA: + return isSetMetadata(); + case IS_SUPPORTED: + return isSetIsSupported(); + case UNKNOWN_FILE_IDS: + return isSetUnknownFileIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetFileMetadataByExprResult) + return this.equals((GetFileMetadataByExprResult)that); + return false; + } + + public boolean equals(GetFileMetadataByExprResult that) { + if (that == null) + return false; + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + boolean this_present_isSupported = true; + boolean that_present_isSupported = true; + if (this_present_isSupported || that_present_isSupported) { + if (!(this_present_isSupported && that_present_isSupported)) + return false; + if (this.isSupported != that.isSupported) + return false; + } + + boolean this_present_unknownFileIds = true && this.isSetUnknownFileIds(); + boolean that_present_unknownFileIds = true && that.isSetUnknownFileIds(); + if (this_present_unknownFileIds || that_present_unknownFileIds) { + if (!(this_present_unknownFileIds && that_present_unknownFileIds)) + return false; + if (!this.unknownFileIds.equals(that.unknownFileIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + boolean present_isSupported = true; + list.add(present_isSupported); + if (present_isSupported) + list.add(isSupported); + + boolean present_unknownFileIds = true && (isSetUnknownFileIds()); + list.add(present_unknownFileIds); + if (present_unknownFileIds) + list.add(unknownFileIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetFileMetadataByExprResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsSupported()).compareTo(other.isSetIsSupported()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsSupported()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isSupported, other.isSupported); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUnknownFileIds()).compareTo(other.isSetUnknownFileIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUnknownFileIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.unknownFileIds, other.unknownFileIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetFileMetadataByExprResult("); + boolean first = true; + + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + sb.append(this.metadata); + } + first = false; + if (!first) sb.append(", "); + sb.append("isSupported:"); + sb.append(this.isSupported); + first = false; + if (!first) sb.append(", "); + sb.append("unknownFileIds:"); + if (this.unknownFileIds == null) { + sb.append("null"); + } else { + sb.append(this.unknownFileIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetMetadata()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString()); + } + + if (!isSetIsSupported()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'isSupported' is unset! Struct:" + toString()); + } + + if (!isSetUnknownFileIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'unknownFileIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetFileMetadataByExprResultStandardSchemeFactory implements SchemeFactory { + public GetFileMetadataByExprResultStandardScheme getScheme() { + return new GetFileMetadataByExprResultStandardScheme(); + } + } + + private static class GetFileMetadataByExprResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map524 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map524.size); + long _key525; + MetadataPpdResult _val526; + for (int _i527 = 0; _i527 < _map524.size; ++_i527) + { + _key525 = iprot.readI64(); + _val526 = new MetadataPpdResult(); + _val526.read(iprot); + struct.metadata.put(_key525, _val526); + } + iprot.readMapEnd(); + } + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // IS_SUPPORTED + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isSupported = iprot.readBool(); + struct.setIsSupportedIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // UNKNOWN_FILE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list528 = iprot.readListBegin(); + struct.unknownFileIds = new ArrayList(_list528.size); + long _elem529; + for (int _i530 = 0; _i530 < _list528.size; ++_i530) + { + _elem529 = iprot.readI64(); + struct.unknownFileIds.add(_elem529); + } + iprot.readListEnd(); + } + struct.setUnknownFileIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.metadata != null) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); + for (Map.Entry _iter531 : struct.metadata.entrySet()) + { + oprot.writeI64(_iter531.getKey()); + _iter531.getValue().write(oprot); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(IS_SUPPORTED_FIELD_DESC); + oprot.writeBool(struct.isSupported); + oprot.writeFieldEnd(); + if (struct.unknownFileIds != null) { + oprot.writeFieldBegin(UNKNOWN_FILE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.unknownFileIds.size())); + for (long _iter532 : struct.unknownFileIds) + { + oprot.writeI64(_iter532); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetFileMetadataByExprResultTupleSchemeFactory implements SchemeFactory { + public GetFileMetadataByExprResultTupleScheme getScheme() { + return new GetFileMetadataByExprResultTupleScheme(); + } + } + + private static class GetFileMetadataByExprResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.metadata.size()); + for (Map.Entry _iter533 : struct.metadata.entrySet()) + { + oprot.writeI64(_iter533.getKey()); + _iter533.getValue().write(oprot); + } + } + oprot.writeBool(struct.isSupported); + { + oprot.writeI32(struct.unknownFileIds.size()); + for (long _iter534 : struct.unknownFileIds) + { + oprot.writeI64(_iter534); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TMap _map535 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map535.size); + long _key536; + MetadataPpdResult _val537; + for (int _i538 = 0; _i538 < _map535.size; ++_i538) + { + _key536 = iprot.readI64(); + _val537 = new MetadataPpdResult(); + _val537.read(iprot); + struct.metadata.put(_key536, _val537); + } + } + struct.setMetadataIsSet(true); + struct.isSupported = iprot.readBool(); + struct.setIsSupportedIsSet(true); + { + org.apache.thrift.protocol.TList _list539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.unknownFileIds = new ArrayList(_list539.size); + long _elem540; + for (int _i541 = 0; _i541 < _list539.size; ++_i541) + { + _elem540 = iprot.readI64(); + struct.unknownFileIds.add(_elem540); + } + } + struct.setUnknownFileIdsIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java new file mode 100644 index 0000000..e4cd1c4 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -0,0 +1,438 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class GetFileMetadataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataRequest"); + + private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetFileMetadataRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetFileMetadataRequestTupleSchemeFactory()); + } + + private List fileIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FILE_IDS((short)1, "fileIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FILE_IDS + return FILE_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataRequest.class, metaDataMap); + } + + public GetFileMetadataRequest() { + } + + public GetFileMetadataRequest( + List fileIds) + { + this(); + this.fileIds = fileIds; + } + + /** + * Performs a deep copy on other. + */ + public GetFileMetadataRequest(GetFileMetadataRequest other) { + if (other.isSetFileIds()) { + List __this__fileIds = new ArrayList(other.fileIds); + this.fileIds = __this__fileIds; + } + } + + public GetFileMetadataRequest deepCopy() { + return new GetFileMetadataRequest(this); + } + + @Override + public void clear() { + this.fileIds = null; + } + + public int getFileIdsSize() { + return (this.fileIds == null) ? 0 : this.fileIds.size(); + } + + public java.util.Iterator getFileIdsIterator() { + return (this.fileIds == null) ? null : this.fileIds.iterator(); + } + + public void addToFileIds(long elem) { + if (this.fileIds == null) { + this.fileIds = new ArrayList(); + } + this.fileIds.add(elem); + } + + public List getFileIds() { + return this.fileIds; + } + + public void setFileIds(List fileIds) { + this.fileIds = fileIds; + } + + public void unsetFileIds() { + this.fileIds = null; + } + + /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */ + public boolean isSetFileIds() { + return this.fileIds != null; + } + + public void setFileIdsIsSet(boolean value) { + if (!value) { + this.fileIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FILE_IDS: + if (value == null) { + unsetFileIds(); + } else { + setFileIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FILE_IDS: + return getFileIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FILE_IDS: + return isSetFileIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetFileMetadataRequest) + return this.equals((GetFileMetadataRequest)that); + return false; + } + + public boolean equals(GetFileMetadataRequest that) { + if (that == null) + return false; + + boolean this_present_fileIds = true && this.isSetFileIds(); + boolean that_present_fileIds = true && that.isSetFileIds(); + if (this_present_fileIds || that_present_fileIds) { + if (!(this_present_fileIds && that_present_fileIds)) + return false; + if (!this.fileIds.equals(that.fileIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fileIds = true && (isSetFileIds()); + list.add(present_fileIds); + if (present_fileIds) + list.add(fileIds); + + return list.hashCode(); + } + + @Override + public int compareTo(GetFileMetadataRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFileIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetFileMetadataRequest("); + boolean first = true; + + sb.append("fileIds:"); + if (this.fileIds == null) { + sb.append("null"); + } else { + sb.append(this.fileIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFileIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetFileMetadataRequestStandardSchemeFactory implements SchemeFactory { + public GetFileMetadataRequestStandardScheme getScheme() { + return new GetFileMetadataRequestStandardScheme(); + } + } + + private static class GetFileMetadataRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FILE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list560 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list560.size); + long _elem561; + for (int _i562 = 0; _i562 < _list560.size; ++_i562) + { + _elem561 = iprot.readI64(); + struct.fileIds.add(_elem561); + } + iprot.readListEnd(); + } + struct.setFileIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fileIds != null) { + oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); + for (long _iter563 : struct.fileIds) + { + oprot.writeI64(_iter563); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetFileMetadataRequestTupleSchemeFactory implements SchemeFactory { + public GetFileMetadataRequestTupleScheme getScheme() { + return new GetFileMetadataRequestTupleScheme(); + } + } + + private static class GetFileMetadataRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.fileIds.size()); + for (long _iter564 : struct.fileIds) + { + oprot.writeI64(_iter564); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list565 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list565.size); + long _elem566; + for (int _i567 = 0; _i567 < _list565.size; ++_i567) + { + _elem566 = iprot.readI64(); + struct.fileIds.add(_elem566); + } + } + struct.setFileIdsIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java new file mode 100644 index 0000000..a7d01e1 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -0,0 +1,540 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class GetFileMetadataResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataResult"); + + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.MAP, (short)1); + private static final org.apache.thrift.protocol.TField IS_SUPPORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("isSupported", org.apache.thrift.protocol.TType.BOOL, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetFileMetadataResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetFileMetadataResultTupleSchemeFactory()); + } + + private Map metadata; // required + private boolean isSupported; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + METADATA((short)1, "metadata"), + IS_SUPPORTED((short)2, "isSupported"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // METADATA + return METADATA; + case 2: // IS_SUPPORTED + return IS_SUPPORTED; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ISSUPPORTED_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); + tmpMap.put(_Fields.IS_SUPPORTED, new org.apache.thrift.meta_data.FieldMetaData("isSupported", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataResult.class, metaDataMap); + } + + public GetFileMetadataResult() { + } + + public GetFileMetadataResult( + Map metadata, + boolean isSupported) + { + this(); + this.metadata = metadata; + this.isSupported = isSupported; + setIsSupportedIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetFileMetadataResult(GetFileMetadataResult other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetMetadata()) { + Map __this__metadata = new HashMap(other.metadata); + this.metadata = __this__metadata; + } + this.isSupported = other.isSupported; + } + + public GetFileMetadataResult deepCopy() { + return new GetFileMetadataResult(this); + } + + @Override + public void clear() { + this.metadata = null; + setIsSupportedIsSet(false); + this.isSupported = false; + } + + public int getMetadataSize() { + return (this.metadata == null) ? 0 : this.metadata.size(); + } + + public void putToMetadata(long key, ByteBuffer val) { + if (this.metadata == null) { + this.metadata = new HashMap(); + } + this.metadata.put(key, val); + } + + public Map getMetadata() { + return this.metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public boolean isIsSupported() { + return this.isSupported; + } + + public void setIsSupported(boolean isSupported) { + this.isSupported = isSupported; + setIsSupportedIsSet(true); + } + + public void unsetIsSupported() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID); + } + + /** Returns true if field isSupported is set (has been assigned a value) and false otherwise */ + public boolean isSetIsSupported() { + return EncodingUtils.testBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID); + } + + public void setIsSupportedIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((Map)value); + } + break; + + case IS_SUPPORTED: + if (value == null) { + unsetIsSupported(); + } else { + setIsSupported((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case METADATA: + return getMetadata(); + + case IS_SUPPORTED: + return Boolean.valueOf(isIsSupported()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case METADATA: + return isSetMetadata(); + case IS_SUPPORTED: + return isSetIsSupported(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetFileMetadataResult) + return this.equals((GetFileMetadataResult)that); + return false; + } + + public boolean equals(GetFileMetadataResult that) { + if (that == null) + return false; + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + boolean this_present_isSupported = true; + boolean that_present_isSupported = true; + if (this_present_isSupported || that_present_isSupported) { + if (!(this_present_isSupported && that_present_isSupported)) + return false; + if (this.isSupported != that.isSupported) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + boolean present_isSupported = true; + list.add(present_isSupported); + if (present_isSupported) + list.add(isSupported); + + return list.hashCode(); + } + + @Override + public int compareTo(GetFileMetadataResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIsSupported()).compareTo(other.isSetIsSupported()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIsSupported()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isSupported, other.isSupported); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetFileMetadataResult("); + boolean first = true; + + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + sb.append(this.metadata); + } + first = false; + if (!first) sb.append(", "); + sb.append("isSupported:"); + sb.append(this.isSupported); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetMetadata()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString()); + } + + if (!isSetIsSupported()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'isSupported' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetFileMetadataResultStandardSchemeFactory implements SchemeFactory { + public GetFileMetadataResultStandardScheme getScheme() { + return new GetFileMetadataResultStandardScheme(); + } + } + + private static class GetFileMetadataResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map550 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map550.size); + long _key551; + ByteBuffer _val552; + for (int _i553 = 0; _i553 < _map550.size; ++_i553) + { + _key551 = iprot.readI64(); + _val552 = iprot.readBinary(); + struct.metadata.put(_key551, _val552); + } + iprot.readMapEnd(); + } + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // IS_SUPPORTED + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.isSupported = iprot.readBool(); + struct.setIsSupportedIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.metadata != null) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); + for (Map.Entry _iter554 : struct.metadata.entrySet()) + { + oprot.writeI64(_iter554.getKey()); + oprot.writeBinary(_iter554.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(IS_SUPPORTED_FIELD_DESC); + oprot.writeBool(struct.isSupported); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetFileMetadataResultTupleSchemeFactory implements SchemeFactory { + public GetFileMetadataResultTupleScheme getScheme() { + return new GetFileMetadataResultTupleScheme(); + } + } + + private static class GetFileMetadataResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.metadata.size()); + for (Map.Entry _iter555 : struct.metadata.entrySet()) + { + oprot.writeI64(_iter555.getKey()); + oprot.writeBinary(_iter555.getValue()); + } + } + oprot.writeBool(struct.isSupported); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TMap _map556 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map556.size); + long _key557; + ByteBuffer _val558; + for (int _i559 = 0; _i559 < _map556.size; ++_i559) + { + _key557 = iprot.readI64(); + _val558 = iprot.readBinary(); + struct.metadata.put(_key557, _val558); + } + } + struct.setMetadataIsSet(true); + struct.isSupported = iprot.readBool(); + struct.setIsSupportedIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index 61dea16..ad1af91 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetOpenTxnsInfoResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsInfoResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index bd21de5..fb6d841 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetOpenTxnsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java index 45e0cc1..6111cb9 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetPrincipalsInRoleRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java index 805b898..abe22af 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetPrincipalsInRoleResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java index c2c7259..42154b3 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetRoleGrantsForPrincipalRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java index 7156665..2df6f63 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GetRoleGrantsForPrincipalResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java index 94e5a5e..10282e7 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GrantRevokePrivilegeRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java index 1949aed..6a123e9 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GrantRevokePrivilegeResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java index ac3527a..8355cee 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GrantRevokeRoleRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java index 81ae31f..f360916 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class GrantRevokeRoleResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java index 4a9ba7f..44c7958 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HeartbeatRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java index dd6ed17..bae4cda 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HeartbeatTxnRangeRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 11bd82b..54b0e93 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HeartbeatTxnRangeResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java index 931b4e1..009bd55 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HiveObjectPrivilege implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectPrivilege"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java index 2573cea..9d581d2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HiveObjectRef implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectRef"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java index 81227c3..98d0f22 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Index implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Index"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java index 50517ed..9c03813 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class IndexAlreadyExistsException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IndexAlreadyExistsException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 2e14cf9..3dc80f1 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InsertEventRequestData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InsertEventRequestData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java index 87e2f6b..e169271 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InvalidInputException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidInputException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java index 14dd7d8..47c16e8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InvalidObjectException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidObjectException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java index 2e3f1e3..969fd8a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InvalidOperationException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidOperationException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java index 1d9b565..8a6db46 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InvalidPartitionException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidPartitionException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java index 319f8bb..dfccfb5 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class LockComponent implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockComponent"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 6894bfa..f3596db 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class LockRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java index 08acaeb..168e8bc 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class LockResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java index 93d2386..96ed366 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class LongColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LongColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java index 883a1d4..04a942a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class MetaException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetaException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java new file mode 100644 index 0000000..cfae60d --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java @@ -0,0 +1,508 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class MetadataPpdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetadataPpdResult"); + + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField INCLUDE_BITSET_FIELD_DESC = new org.apache.thrift.protocol.TField("includeBitset", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new MetadataPpdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new MetadataPpdResultTupleSchemeFactory()); + } + + private ByteBuffer metadata; // required + private ByteBuffer includeBitset; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + METADATA((short)1, "metadata"), + INCLUDE_BITSET((short)2, "includeBitset"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // METADATA + return METADATA; + case 2: // INCLUDE_BITSET + return INCLUDE_BITSET; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.INCLUDE_BITSET, new org.apache.thrift.meta_data.FieldMetaData("includeBitset", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MetadataPpdResult.class, metaDataMap); + } + + public MetadataPpdResult() { + } + + public MetadataPpdResult( + ByteBuffer metadata, + ByteBuffer includeBitset) + { + this(); + this.metadata = org.apache.thrift.TBaseHelper.copyBinary(metadata); + this.includeBitset = org.apache.thrift.TBaseHelper.copyBinary(includeBitset); + } + + /** + * Performs a deep copy on other. + */ + public MetadataPpdResult(MetadataPpdResult other) { + if (other.isSetMetadata()) { + this.metadata = org.apache.thrift.TBaseHelper.copyBinary(other.metadata); + } + if (other.isSetIncludeBitset()) { + this.includeBitset = org.apache.thrift.TBaseHelper.copyBinary(other.includeBitset); + } + } + + public MetadataPpdResult deepCopy() { + return new MetadataPpdResult(this); + } + + @Override + public void clear() { + this.metadata = null; + this.includeBitset = null; + } + + public byte[] getMetadata() { + setMetadata(org.apache.thrift.TBaseHelper.rightSize(metadata)); + return metadata == null ? null : metadata.array(); + } + + public ByteBuffer bufferForMetadata() { + return org.apache.thrift.TBaseHelper.copyBinary(metadata); + } + + public void setMetadata(byte[] metadata) { + this.metadata = metadata == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(metadata, metadata.length)); + } + + public void setMetadata(ByteBuffer metadata) { + this.metadata = org.apache.thrift.TBaseHelper.copyBinary(metadata); + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public byte[] getIncludeBitset() { + setIncludeBitset(org.apache.thrift.TBaseHelper.rightSize(includeBitset)); + return includeBitset == null ? null : includeBitset.array(); + } + + public ByteBuffer bufferForIncludeBitset() { + return org.apache.thrift.TBaseHelper.copyBinary(includeBitset); + } + + public void setIncludeBitset(byte[] includeBitset) { + this.includeBitset = includeBitset == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(includeBitset, includeBitset.length)); + } + + public void setIncludeBitset(ByteBuffer includeBitset) { + this.includeBitset = org.apache.thrift.TBaseHelper.copyBinary(includeBitset); + } + + public void unsetIncludeBitset() { + this.includeBitset = null; + } + + /** Returns true if field includeBitset is set (has been assigned a value) and false otherwise */ + public boolean isSetIncludeBitset() { + return this.includeBitset != null; + } + + public void setIncludeBitsetIsSet(boolean value) { + if (!value) { + this.includeBitset = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((ByteBuffer)value); + } + break; + + case INCLUDE_BITSET: + if (value == null) { + unsetIncludeBitset(); + } else { + setIncludeBitset((ByteBuffer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case METADATA: + return getMetadata(); + + case INCLUDE_BITSET: + return getIncludeBitset(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case METADATA: + return isSetMetadata(); + case INCLUDE_BITSET: + return isSetIncludeBitset(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof MetadataPpdResult) + return this.equals((MetadataPpdResult)that); + return false; + } + + public boolean equals(MetadataPpdResult that) { + if (that == null) + return false; + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + boolean this_present_includeBitset = true && this.isSetIncludeBitset(); + boolean that_present_includeBitset = true && that.isSetIncludeBitset(); + if (this_present_includeBitset || that_present_includeBitset) { + if (!(this_present_includeBitset && that_present_includeBitset)) + return false; + if (!this.includeBitset.equals(that.includeBitset)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + boolean present_includeBitset = true && (isSetIncludeBitset()); + list.add(present_includeBitset); + if (present_includeBitset) + list.add(includeBitset); + + return list.hashCode(); + } + + @Override + public int compareTo(MetadataPpdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIncludeBitset()).compareTo(other.isSetIncludeBitset()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIncludeBitset()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.includeBitset, other.includeBitset); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("MetadataPpdResult("); + boolean first = true; + + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.metadata, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("includeBitset:"); + if (this.includeBitset == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.includeBitset, sb); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetMetadata()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString()); + } + + if (!isSetIncludeBitset()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'includeBitset' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class MetadataPpdResultStandardSchemeFactory implements SchemeFactory { + public MetadataPpdResultStandardScheme getScheme() { + return new MetadataPpdResultStandardScheme(); + } + } + + private static class MetadataPpdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, MetadataPpdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.metadata = iprot.readBinary(); + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // INCLUDE_BITSET + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.includeBitset = iprot.readBinary(); + struct.setIncludeBitsetIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, MetadataPpdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.metadata != null) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + oprot.writeBinary(struct.metadata); + oprot.writeFieldEnd(); + } + if (struct.includeBitset != null) { + oprot.writeFieldBegin(INCLUDE_BITSET_FIELD_DESC); + oprot.writeBinary(struct.includeBitset); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class MetadataPpdResultTupleSchemeFactory implements SchemeFactory { + public MetadataPpdResultTupleScheme getScheme() { + return new MetadataPpdResultTupleScheme(); + } + } + + private static class MetadataPpdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, MetadataPpdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBinary(struct.metadata); + oprot.writeBinary(struct.includeBitset); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, MetadataPpdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.metadata = iprot.readBinary(); + struct.setMetadataIsSet(true); + struct.includeBitset = iprot.readBinary(); + struct.setIncludeBitsetIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java index e214b36..b6f4fd4 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NoSuchLockException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchLockException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java index 1292a64..f4ebee8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NoSuchObjectException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchObjectException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java index d1c430d..687e750 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NoSuchTxnException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchTxnException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java index bcf4f51..b7b1a87 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NotificationEvent implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEvent"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java index c2bc4e8..2c02b6b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NotificationEventRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index 24f9ce4..ff79fc9 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class NotificationEventResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java index c5f9ccf..ff8d200 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class OpenTxnRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index c233422..bf1f310 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class OpenTxnsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnsResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java index da7bd55..7f57e7d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Order implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Order"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 224d28e..1e473d6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Partition implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Partition"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java index c50a100..d765cd6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionListComposingSpec implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionListComposingSpec"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index b5251af..99eaa4a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionSpec implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpec"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java index 5574e0b..131967b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpecWithSharedSD"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java index e1ec73e..ca6dff2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionWithoutSD implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionWithoutSD"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 6149c31..08b1439 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionsByExprRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 740f7bd..de09261 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionsByExprResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprResult"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 5d1ee87..8359883 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionsStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index da33014..a020261 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PartitionsStatsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java index 8f29f50..7fa2bee 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PrincipalPrivilegeSet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrincipalPrivilegeSet"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java index 2fd819c..37149f1 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PrivilegeBag implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeBag"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java index c04e196..22471c7 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class PrivilegeGrantInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeGrantInfo"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java new file mode 100644 index 0000000..874ea82 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -0,0 +1,588 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class PutFileMetadataRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutFileMetadataRequest"); + + private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.LIST, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new PutFileMetadataRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new PutFileMetadataRequestTupleSchemeFactory()); + } + + private List fileIds; // required + private List metadata; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FILE_IDS((short)1, "fileIds"), + METADATA((short)2, "metadata"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FILE_IDS + return FILE_IDS; + case 2: // METADATA + return METADATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutFileMetadataRequest.class, metaDataMap); + } + + public PutFileMetadataRequest() { + } + + public PutFileMetadataRequest( + List fileIds, + List metadata) + { + this(); + this.fileIds = fileIds; + this.metadata = metadata; + } + + /** + * Performs a deep copy on other. + */ + public PutFileMetadataRequest(PutFileMetadataRequest other) { + if (other.isSetFileIds()) { + List __this__fileIds = new ArrayList(other.fileIds); + this.fileIds = __this__fileIds; + } + if (other.isSetMetadata()) { + List __this__metadata = new ArrayList(other.metadata); + this.metadata = __this__metadata; + } + } + + public PutFileMetadataRequest deepCopy() { + return new PutFileMetadataRequest(this); + } + + @Override + public void clear() { + this.fileIds = null; + this.metadata = null; + } + + public int getFileIdsSize() { + return (this.fileIds == null) ? 0 : this.fileIds.size(); + } + + public java.util.Iterator getFileIdsIterator() { + return (this.fileIds == null) ? null : this.fileIds.iterator(); + } + + public void addToFileIds(long elem) { + if (this.fileIds == null) { + this.fileIds = new ArrayList(); + } + this.fileIds.add(elem); + } + + public List getFileIds() { + return this.fileIds; + } + + public void setFileIds(List fileIds) { + this.fileIds = fileIds; + } + + public void unsetFileIds() { + this.fileIds = null; + } + + /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */ + public boolean isSetFileIds() { + return this.fileIds != null; + } + + public void setFileIdsIsSet(boolean value) { + if (!value) { + this.fileIds = null; + } + } + + public int getMetadataSize() { + return (this.metadata == null) ? 0 : this.metadata.size(); + } + + public java.util.Iterator getMetadataIterator() { + return (this.metadata == null) ? null : this.metadata.iterator(); + } + + public void addToMetadata(ByteBuffer elem) { + if (this.metadata == null) { + this.metadata = new ArrayList(); + } + this.metadata.add(elem); + } + + public List getMetadata() { + return this.metadata; + } + + public void setMetadata(List metadata) { + this.metadata = metadata; + } + + public void unsetMetadata() { + this.metadata = null; + } + + /** Returns true if field metadata is set (has been assigned a value) and false otherwise */ + public boolean isSetMetadata() { + return this.metadata != null; + } + + public void setMetadataIsSet(boolean value) { + if (!value) { + this.metadata = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FILE_IDS: + if (value == null) { + unsetFileIds(); + } else { + setFileIds((List)value); + } + break; + + case METADATA: + if (value == null) { + unsetMetadata(); + } else { + setMetadata((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FILE_IDS: + return getFileIds(); + + case METADATA: + return getMetadata(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FILE_IDS: + return isSetFileIds(); + case METADATA: + return isSetMetadata(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof PutFileMetadataRequest) + return this.equals((PutFileMetadataRequest)that); + return false; + } + + public boolean equals(PutFileMetadataRequest that) { + if (that == null) + return false; + + boolean this_present_fileIds = true && this.isSetFileIds(); + boolean that_present_fileIds = true && that.isSetFileIds(); + if (this_present_fileIds || that_present_fileIds) { + if (!(this_present_fileIds && that_present_fileIds)) + return false; + if (!this.fileIds.equals(that.fileIds)) + return false; + } + + boolean this_present_metadata = true && this.isSetMetadata(); + boolean that_present_metadata = true && that.isSetMetadata(); + if (this_present_metadata || that_present_metadata) { + if (!(this_present_metadata && that_present_metadata)) + return false; + if (!this.metadata.equals(that.metadata)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fileIds = true && (isSetFileIds()); + list.add(present_fileIds); + if (present_fileIds) + list.add(fileIds); + + boolean present_metadata = true && (isSetMetadata()); + list.add(present_metadata); + if (present_metadata) + list.add(metadata); + + return list.hashCode(); + } + + @Override + public int compareTo(PutFileMetadataRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFileIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMetadata()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("PutFileMetadataRequest("); + boolean first = true; + + sb.append("fileIds:"); + if (this.fileIds == null) { + sb.append("null"); + } else { + sb.append(this.fileIds); + } + first = false; + if (!first) sb.append(", "); + sb.append("metadata:"); + if (this.metadata == null) { + sb.append("null"); + } else { + sb.append(this.metadata); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetFileIds()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString()); + } + + if (!isSetMetadata()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class PutFileMetadataRequestStandardSchemeFactory implements SchemeFactory { + public PutFileMetadataRequestStandardScheme getScheme() { + return new PutFileMetadataRequestStandardScheme(); + } + } + + private static class PutFileMetadataRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FILE_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list568 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list568.size); + long _elem569; + for (int _i570 = 0; _i570 < _list568.size; ++_i570) + { + _elem569 = iprot.readI64(); + struct.fileIds.add(_elem569); + } + iprot.readListEnd(); + } + struct.setFileIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // METADATA + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list571 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list571.size); + ByteBuffer _elem572; + for (int _i573 = 0; _i573 < _list571.size; ++_i573) + { + _elem572 = iprot.readBinary(); + struct.metadata.add(_elem572); + } + iprot.readListEnd(); + } + struct.setMetadataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fileIds != null) { + oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); + for (long _iter574 : struct.fileIds) + { + oprot.writeI64(_iter574); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.metadata != null) { + oprot.writeFieldBegin(METADATA_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); + for (ByteBuffer _iter575 : struct.metadata) + { + oprot.writeBinary(_iter575); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class PutFileMetadataRequestTupleSchemeFactory implements SchemeFactory { + public PutFileMetadataRequestTupleScheme getScheme() { + return new PutFileMetadataRequestTupleScheme(); + } + } + + private static class PutFileMetadataRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.fileIds.size()); + for (long _iter576 : struct.fileIds) + { + oprot.writeI64(_iter576); + } + } + { + oprot.writeI32(struct.metadata.size()); + for (ByteBuffer _iter577 : struct.metadata) + { + oprot.writeBinary(_iter577); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list578 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list578.size); + long _elem579; + for (int _i580 = 0; _i580 < _list578.size; ++_i580) + { + _elem579 = iprot.readI64(); + struct.fileIds.add(_elem579); + } + } + struct.setFileIdsIsSet(true); + { + org.apache.thrift.protocol.TList _list581 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list581.size); + ByteBuffer _elem582; + for (int _i583 = 0; _i583 < _list581.size; ++_i583) + { + _elem582 = iprot.readBinary(); + struct.metadata.add(_elem582); + } + } + struct.setMetadataIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java new file mode 100644 index 0000000..e478cf3 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.2) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") +public class PutFileMetadataResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutFileMetadataResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new PutFileMetadataResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new PutFileMetadataResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutFileMetadataResult.class, metaDataMap); + } + + public PutFileMetadataResult() { + } + + /** + * Performs a deep copy on other. + */ + public PutFileMetadataResult(PutFileMetadataResult other) { + } + + public PutFileMetadataResult deepCopy() { + return new PutFileMetadataResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof PutFileMetadataResult) + return this.equals((PutFileMetadataResult)that); + return false; + } + + public boolean equals(PutFileMetadataResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(PutFileMetadataResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("PutFileMetadataResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class PutFileMetadataResultStandardSchemeFactory implements SchemeFactory { + public PutFileMetadataResultStandardScheme getScheme() { + return new PutFileMetadataResultStandardScheme(); + } + } + + private static class PutFileMetadataResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class PutFileMetadataResultTupleSchemeFactory implements SchemeFactory { + public PutFileMetadataResultTupleScheme getScheme() { + return new PutFileMetadataResultTupleScheme(); + } + } + + private static class PutFileMetadataResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java index 3b3df25..a94ce18 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ResourceUri implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ResourceUri"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java index 5c882d2..8f38145 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Role implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Role"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java index c4beb08..e763bdd 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class RolePrincipalGrant implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RolePrincipalGrant"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index 8772180..ede0cb4 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Schema implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Schema"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java index b30e698..15af1db 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class SerDeInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SerDeInfo"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 7da298c..ac9420f 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class SetPartitionsStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetPartitionsStatsRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java index 7756384..0b4e754 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowCompactRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index dd1e857..0c98dc4 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowCompactResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java index cd7e79e..1a5926e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowCompactResponseElement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponseElement"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java index 122c070..adf9350 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowLocksRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 52b0bbc..6e577d5 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowLocksResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponse"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java index 8be9b05..80367ac 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ShowLocksResponseElement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponseElement"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java index bc64495..b4fa97a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class SkewedInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SkewedInfo"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java index 165a879..3759f9d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class StorageDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StorageDescriptor"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java index 9906ff3..0bab26b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class StringColumnStatsData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StringColumnStatsData"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 51b9e38..f11e6aa 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Table implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable
{ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Table"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index 1edcaf9..c1092e2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TableStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index 25a1f25..d0577cf 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TableStatsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index cdbae95..9d72cd0 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ThriftHiveMetastore { /** @@ -290,6 +290,16 @@ public FireEventResponse fire_listener_event(FireEventRequest rqst) throws org.apache.thrift.TException; + public void flushCache() throws org.apache.thrift.TException; + + public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req) throws org.apache.thrift.TException; + + public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws org.apache.thrift.TException; + + public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws org.apache.thrift.TException; + + public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) throws org.apache.thrift.TException; + } public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -542,6 +552,16 @@ public void fire_listener_event(FireEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void flushCache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_file_metadata_by_expr(GetFileMetadataByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_file_metadata(GetFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void put_file_metadata(PutFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void clear_file_metadata(ClearFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -4219,6 +4239,117 @@ public FireEventResponse recv_fire_listener_event() throws org.apache.thrift.TEx throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "fire_listener_event failed: unknown result"); } + public void flushCache() throws org.apache.thrift.TException + { + send_flushCache(); + recv_flushCache(); + } + + public void send_flushCache() throws org.apache.thrift.TException + { + flushCache_args args = new flushCache_args(); + sendBase("flushCache", args); + } + + public void recv_flushCache() throws org.apache.thrift.TException + { + flushCache_result result = new flushCache_result(); + receiveBase(result, "flushCache"); + return; + } + + public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req) throws org.apache.thrift.TException + { + send_get_file_metadata_by_expr(req); + return recv_get_file_metadata_by_expr(); + } + + public void send_get_file_metadata_by_expr(GetFileMetadataByExprRequest req) throws org.apache.thrift.TException + { + get_file_metadata_by_expr_args args = new get_file_metadata_by_expr_args(); + args.setReq(req); + sendBase("get_file_metadata_by_expr", args); + } + + public GetFileMetadataByExprResult recv_get_file_metadata_by_expr() throws org.apache.thrift.TException + { + get_file_metadata_by_expr_result result = new get_file_metadata_by_expr_result(); + receiveBase(result, "get_file_metadata_by_expr"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + } + + public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws org.apache.thrift.TException + { + send_get_file_metadata(req); + return recv_get_file_metadata(); + } + + public void send_get_file_metadata(GetFileMetadataRequest req) throws org.apache.thrift.TException + { + get_file_metadata_args args = new get_file_metadata_args(); + args.setReq(req); + sendBase("get_file_metadata", args); + } + + public GetFileMetadataResult recv_get_file_metadata() throws org.apache.thrift.TException + { + get_file_metadata_result result = new get_file_metadata_result(); + receiveBase(result, "get_file_metadata"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_file_metadata failed: unknown result"); + } + + public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws org.apache.thrift.TException + { + send_put_file_metadata(req); + return recv_put_file_metadata(); + } + + public void send_put_file_metadata(PutFileMetadataRequest req) throws org.apache.thrift.TException + { + put_file_metadata_args args = new put_file_metadata_args(); + args.setReq(req); + sendBase("put_file_metadata", args); + } + + public PutFileMetadataResult recv_put_file_metadata() throws org.apache.thrift.TException + { + put_file_metadata_result result = new put_file_metadata_result(); + receiveBase(result, "put_file_metadata"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "put_file_metadata failed: unknown result"); + } + + public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) throws org.apache.thrift.TException + { + send_clear_file_metadata(req); + return recv_clear_file_metadata(); + } + + public void send_clear_file_metadata(ClearFileMetadataRequest req) throws org.apache.thrift.TException + { + clear_file_metadata_args args = new clear_file_metadata_args(); + args.setReq(req); + sendBase("clear_file_metadata", args); + } + + public ClearFileMetadataResult recv_clear_file_metadata() throws org.apache.thrift.TException + { + clear_file_metadata_result result = new clear_file_metadata_result(); + receiveBase(result, "clear_file_metadata"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "clear_file_metadata failed: unknown result"); + } + } public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -8652,6 +8783,163 @@ public FireEventResponse getResult() throws org.apache.thrift.TException { } } + public void flushCache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + flushCache_call method_call = new flushCache_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class flushCache_call extends org.apache.thrift.async.TAsyncMethodCall { + public flushCache_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("flushCache", org.apache.thrift.protocol.TMessageType.CALL, 0)); + flushCache_args args = new flushCache_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_flushCache(); + } + } + + public void get_file_metadata_by_expr(GetFileMetadataByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_file_metadata_by_expr_call method_call = new get_file_metadata_by_expr_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_file_metadata_by_expr_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetFileMetadataByExprRequest req; + public get_file_metadata_by_expr_call(GetFileMetadataByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_file_metadata_by_expr", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_file_metadata_by_expr_args args = new get_file_metadata_by_expr_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetFileMetadataByExprResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_file_metadata_by_expr(); + } + } + + public void get_file_metadata(GetFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_file_metadata_call method_call = new get_file_metadata_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_file_metadata_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetFileMetadataRequest req; + public get_file_metadata_call(GetFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_file_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_file_metadata_args args = new get_file_metadata_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetFileMetadataResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_file_metadata(); + } + } + + public void put_file_metadata(PutFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + put_file_metadata_call method_call = new put_file_metadata_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class put_file_metadata_call extends org.apache.thrift.async.TAsyncMethodCall { + private PutFileMetadataRequest req; + public put_file_metadata_call(PutFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("put_file_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); + put_file_metadata_args args = new put_file_metadata_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public PutFileMetadataResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_put_file_metadata(); + } + } + + public void clear_file_metadata(ClearFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + clear_file_metadata_call method_call = new clear_file_metadata_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class clear_file_metadata_call extends org.apache.thrift.async.TAsyncMethodCall { + private ClearFileMetadataRequest req; + public clear_file_metadata_call(ClearFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("clear_file_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); + clear_file_metadata_args args = new clear_file_metadata_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public ClearFileMetadataResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_clear_file_metadata(); + } + } + } public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -8789,6 +9077,11 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public flushCache() { + super("flushCache"); + } + + public flushCache_args getEmptyArgsInstance() { + return new flushCache_args(); + } + + protected boolean isOneway() { + return false; + } + + public flushCache_result getResult(I iface, flushCache_args args) throws org.apache.thrift.TException { + flushCache_result result = new flushCache_result(); + iface.flushCache(); + return result; + } + } + + public static class get_file_metadata_by_expr extends org.apache.thrift.ProcessFunction { + public get_file_metadata_by_expr() { + super("get_file_metadata_by_expr"); + } + + public get_file_metadata_by_expr_args getEmptyArgsInstance() { + return new get_file_metadata_by_expr_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_file_metadata_by_expr_result getResult(I iface, get_file_metadata_by_expr_args args) throws org.apache.thrift.TException { + get_file_metadata_by_expr_result result = new get_file_metadata_by_expr_result(); + result.success = iface.get_file_metadata_by_expr(args.req); + return result; + } + } + + public static class get_file_metadata extends org.apache.thrift.ProcessFunction { + public get_file_metadata() { + super("get_file_metadata"); + } + + public get_file_metadata_args getEmptyArgsInstance() { + return new get_file_metadata_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_file_metadata_result getResult(I iface, get_file_metadata_args args) throws org.apache.thrift.TException { + get_file_metadata_result result = new get_file_metadata_result(); + result.success = iface.get_file_metadata(args.req); + return result; + } + } + + public static class put_file_metadata extends org.apache.thrift.ProcessFunction { + public put_file_metadata() { + super("put_file_metadata"); + } + + public put_file_metadata_args getEmptyArgsInstance() { + return new put_file_metadata_args(); + } + + protected boolean isOneway() { + return false; + } + + public put_file_metadata_result getResult(I iface, put_file_metadata_args args) throws org.apache.thrift.TException { + put_file_metadata_result result = new put_file_metadata_result(); + result.success = iface.put_file_metadata(args.req); + return result; + } + } + + public static class clear_file_metadata extends org.apache.thrift.ProcessFunction { + public clear_file_metadata() { + super("clear_file_metadata"); + } + + public clear_file_metadata_args getEmptyArgsInstance() { + return new clear_file_metadata_args(); + } + + protected boolean isOneway() { + return false; + } + + public clear_file_metadata_result getResult(I iface, clear_file_metadata_args args) throws org.apache.thrift.TException { + clear_file_metadata_result result = new clear_file_metadata_result(); + result.success = iface.clear_file_metadata(args.req); + return result; + } + } + } public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -12150,6 +12543,11 @@ protected AsyncProcessor(I iface, Map, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - - private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); - } - - private String key; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY((short)1, "key"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } + public static class flushCache extends org.apache.thrift.AsyncProcessFunction { + public flushCache() { + super("flushCache"); } - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // KEY - return KEY; - default: - return null; - } + public flushCache_args getEmptyArgsInstance() { + return new flushCache_args(); } - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + flushCache_result result = new flushCache_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + flushCache_result result = new flushCache_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); + protected boolean isOneway() { + return false; } - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; + public void start(I iface, flushCache_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.flushCache(resultHandler); } + } - public short getThriftFieldId() { - return _thriftId; + public static class get_file_metadata_by_expr extends org.apache.thrift.AsyncProcessFunction { + public get_file_metadata_by_expr() { + super("get_file_metadata_by_expr"); } - public String getFieldName() { - return _fieldName; + public get_file_metadata_by_expr_args getEmptyArgsInstance() { + return new get_file_metadata_by_expr_args(); } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); - } - public getMetaConf_args() { - } - - public getMetaConf_args( - String key) - { - this(); - this.key = key; - } - - /** - * Performs a deep copy on other. - */ - public getMetaConf_args(getMetaConf_args other) { - if (other.isSetKey()) { - this.key = other.key; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetFileMetadataByExprResult o) { + get_file_metadata_by_expr_result result = new get_file_metadata_by_expr_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_file_metadata_by_expr_result result = new get_file_metadata_by_expr_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - } - - public getMetaConf_args deepCopy() { - return new getMetaConf_args(this); - } - - @Override - public void clear() { - this.key = null; - } - - public String getKey() { - return this.key; - } - public void setKey(String key) { - this.key = key; - } - - public void unsetKey() { - this.key = null; - } - - /** Returns true if field key is set (has been assigned a value) and false otherwise */ - public boolean isSetKey() { - return this.key != null; - } - - public void setKeyIsSet(boolean value) { - if (!value) { - this.key = null; + protected boolean isOneway() { + return false; } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case KEY: - if (value == null) { - unsetKey(); - } else { - setKey((String)value); - } - break; + public void start(I iface, get_file_metadata_by_expr_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_file_metadata_by_expr(args.req,resultHandler); } } - public Object getFieldValue(_Fields field) { - switch (field) { - case KEY: - return getKey(); - + public static class get_file_metadata extends org.apache.thrift.AsyncProcessFunction { + public get_file_metadata() { + super("get_file_metadata"); } - throw new IllegalStateException(); - } - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); + public get_file_metadata_args getEmptyArgsInstance() { + return new get_file_metadata_args(); } - switch (field) { - case KEY: - return isSetKey(); + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetFileMetadataResult o) { + get_file_metadata_result result = new get_file_metadata_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_file_metadata_result result = new get_file_metadata_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getMetaConf_args) - return this.equals((getMetaConf_args)that); - return false; - } - public boolean equals(getMetaConf_args that) { - if (that == null) + protected boolean isOneway() { return false; - - boolean this_present_key = true && this.isSetKey(); - boolean that_present_key = true && that.isSetKey(); - if (this_present_key || that_present_key) { - if (!(this_present_key && that_present_key)) - return false; - if (!this.key.equals(that.key)) - return false; } - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_key = true && (isSetKey()); - list.add(present_key); - if (present_key) - list.add(key); - - return list.hashCode(); + public void start(I iface, get_file_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_file_metadata(args.req,resultHandler); + } } - @Override - public int compareTo(getMetaConf_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); + public static class put_file_metadata extends org.apache.thrift.AsyncProcessFunction { + public put_file_metadata() { + super("put_file_metadata"); } - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); - if (lastComparison != 0) { - return lastComparison; + public put_file_metadata_args getEmptyArgsInstance() { + return new put_file_metadata_args(); } - if (isSetKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getMetaConf_args("); - boolean first = true; - - sb.append("key:"); - if (this.key == null) { - sb.append("null"); - } else { - sb.append(this.key); + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(PutFileMetadataResult o) { + put_file_metadata_result result = new put_file_metadata_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + put_file_metadata_result result = new put_file_metadata_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + protected boolean isOneway() { + return false; } - } - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + public void start(I iface, put_file_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.put_file_metadata(args.req,resultHandler); } } - private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { - public getMetaConf_argsStandardScheme getScheme() { - return new getMetaConf_argsStandardScheme(); + public static class clear_file_metadata extends org.apache.thrift.AsyncProcessFunction { + public clear_file_metadata() { + super("clear_file_metadata"); } - } - private static class getMetaConf_argsStandardScheme extends StandardScheme { + public clear_file_metadata_args getEmptyArgsInstance() { + return new clear_file_metadata_args(); + } - public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(ClearFileMetadataResult o) { + clear_file_metadata_result result = new clear_file_metadata_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); } - switch (schemeField.id) { - case 1: // KEY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + clear_file_metadata_result result = new clear_file_metadata_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.key != null) { - oprot.writeFieldBegin(KEY_FIELD_DESC); - oprot.writeString(struct.key); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { - public getMetaConf_argsTupleScheme getScheme() { - return new getMetaConf_argsTupleScheme(); + }; } - } - - private static class getMetaConf_argsTupleScheme extends TupleScheme { - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetKey()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetKey()) { - oprot.writeString(struct.key); - } + protected boolean isOneway() { + return false; } - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } + public void start(I iface, clear_file_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.clear_file_metadata(args.req,resultHandler); } } } - public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); } - private String success; // required - private MetaException o1; // required + private String key; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - O1((short)1, "o1"); + KEY((short)1, "key"); private static final Map byName = new HashMap(); @@ -20205,10 +20496,371 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args str */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - case 1: // O1 - return O1; + case 1: // KEY + return KEY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); + } + + public getMetaConf_args() { + } + + public getMetaConf_args( + String key) + { + this(); + this.key = key; + } + + /** + * Performs a deep copy on other. + */ + public getMetaConf_args(getMetaConf_args other) { + if (other.isSetKey()) { + this.key = other.key; + } + } + + public getMetaConf_args deepCopy() { + return new getMetaConf_args(this); + } + + @Override + public void clear() { + this.key = null; + } + + public String getKey() { + return this.key; + } + + public void setKey(String key) { + this.key = key; + } + + public void unsetKey() { + this.key = null; + } + + /** Returns true if field key is set (has been assigned a value) and false otherwise */ + public boolean isSetKey() { + return this.key != null; + } + + public void setKeyIsSet(boolean value) { + if (!value) { + this.key = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case KEY: + if (value == null) { + unsetKey(); + } else { + setKey((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case KEY: + return getKey(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case KEY: + return isSetKey(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getMetaConf_args) + return this.equals((getMetaConf_args)that); + return false; + } + + public boolean equals(getMetaConf_args that) { + if (that == null) + return false; + + boolean this_present_key = true && this.isSetKey(); + boolean that_present_key = true && that.isSetKey(); + if (this_present_key || that_present_key) { + if (!(this_present_key && that_present_key)) + return false; + if (!this.key.equals(that.key)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_key = true && (isSetKey()); + list.add(present_key); + if (present_key) + list.add(key); + + return list.hashCode(); + } + + @Override + public int compareTo(getMetaConf_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getMetaConf_args("); + boolean first = true; + + sb.append("key:"); + if (this.key == null) { + sb.append("null"); + } else { + sb.append(this.key); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { + public getMetaConf_argsStandardScheme getScheme() { + return new getMetaConf_argsStandardScheme(); + } + } + + private static class getMetaConf_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.key != null) { + oprot.writeFieldBegin(KEY_FIELD_DESC); + oprot.writeString(struct.key); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { + public getMetaConf_argsTupleScheme getScheme() { + return new getMetaConf_argsTupleScheme(); + } + } + + private static class getMetaConf_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetKey()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetKey()) { + oprot.writeString(struct.key); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } + } + } + + } + + public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + } + + private String success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -25207,13 +25859,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); - struct.success = new ArrayList(_list532.size); - String _elem533; - for (int _i534 = 0; _i534 < _list532.size; ++_i534) + org.apache.thrift.protocol.TList _list600 = iprot.readListBegin(); + struct.success = new ArrayList(_list600.size); + String _elem601; + for (int _i602 = 0; _i602 < _list600.size; ++_i602) { - _elem533 = iprot.readString(); - struct.success.add(_elem533); + _elem601 = iprot.readString(); + struct.success.add(_elem601); } iprot.readListEnd(); } @@ -25248,9 +25900,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter535 : struct.success) + for (String _iter603 : struct.success) { - oprot.writeString(_iter535); + oprot.writeString(_iter603); } oprot.writeListEnd(); } @@ -25289,9 +25941,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter536 : struct.success) + for (String _iter604 : struct.success) { - oprot.writeString(_iter536); + oprot.writeString(_iter604); } } } @@ -25306,13 +25958,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list537.size); - String _elem538; - for (int _i539 = 0; _i539 < _list537.size; ++_i539) + org.apache.thrift.protocol.TList _list605 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list605.size); + String _elem606; + for (int _i607 = 0; _i607 < _list605.size; ++_i607) { - _elem538 = iprot.readString(); - struct.success.add(_elem538); + _elem606 = iprot.readString(); + struct.success.add(_elem606); } } struct.setSuccessIsSet(true); @@ -25966,13 +26618,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); - struct.success = new ArrayList(_list540.size); - String _elem541; - for (int _i542 = 0; _i542 < _list540.size; ++_i542) + org.apache.thrift.protocol.TList _list608 = iprot.readListBegin(); + struct.success = new ArrayList(_list608.size); + String _elem609; + for (int _i610 = 0; _i610 < _list608.size; ++_i610) { - _elem541 = iprot.readString(); - struct.success.add(_elem541); + _elem609 = iprot.readString(); + struct.success.add(_elem609); } iprot.readListEnd(); } @@ -26007,9 +26659,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter543 : struct.success) + for (String _iter611 : struct.success) { - oprot.writeString(_iter543); + oprot.writeString(_iter611); } oprot.writeListEnd(); } @@ -26048,9 +26700,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter544 : struct.success) + for (String _iter612 : struct.success) { - oprot.writeString(_iter544); + oprot.writeString(_iter612); } } } @@ -26065,13 +26717,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list545.size); - String _elem546; - for (int _i547 = 0; _i547 < _list545.size; ++_i547) + org.apache.thrift.protocol.TList _list613 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list613.size); + String _elem614; + for (int _i615 = 0; _i615 < _list613.size; ++_i615) { - _elem546 = iprot.readString(); - struct.success.add(_elem546); + _elem614 = iprot.readString(); + struct.success.add(_elem614); } } struct.setSuccessIsSet(true); @@ -30678,16 +31330,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map548 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map548.size); - String _key549; - Type _val550; - for (int _i551 = 0; _i551 < _map548.size; ++_i551) + org.apache.thrift.protocol.TMap _map616 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map616.size); + String _key617; + Type _val618; + for (int _i619 = 0; _i619 < _map616.size; ++_i619) { - _key549 = iprot.readString(); - _val550 = new Type(); - _val550.read(iprot); - struct.success.put(_key549, _val550); + _key617 = iprot.readString(); + _val618 = new Type(); + _val618.read(iprot); + struct.success.put(_key617, _val618); } iprot.readMapEnd(); } @@ -30722,10 +31374,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter552 : struct.success.entrySet()) + for (Map.Entry _iter620 : struct.success.entrySet()) { - oprot.writeString(_iter552.getKey()); - _iter552.getValue().write(oprot); + oprot.writeString(_iter620.getKey()); + _iter620.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -30764,10 +31416,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter553 : struct.success.entrySet()) + for (Map.Entry _iter621 : struct.success.entrySet()) { - oprot.writeString(_iter553.getKey()); - _iter553.getValue().write(oprot); + oprot.writeString(_iter621.getKey()); + _iter621.getValue().write(oprot); } } } @@ -30782,16 +31434,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map554 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map554.size); - String _key555; - Type _val556; - for (int _i557 = 0; _i557 < _map554.size; ++_i557) + org.apache.thrift.protocol.TMap _map622 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map622.size); + String _key623; + Type _val624; + for (int _i625 = 0; _i625 < _map622.size; ++_i625) { - _key555 = iprot.readString(); - _val556 = new Type(); - _val556.read(iprot); - struct.success.put(_key555, _val556); + _key623 = iprot.readString(); + _val624 = new Type(); + _val624.read(iprot); + struct.success.put(_key623, _val624); } } struct.setSuccessIsSet(true); @@ -31826,14 +32478,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list558 = iprot.readListBegin(); - struct.success = new ArrayList(_list558.size); - FieldSchema _elem559; - for (int _i560 = 0; _i560 < _list558.size; ++_i560) + org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); + struct.success = new ArrayList(_list626.size); + FieldSchema _elem627; + for (int _i628 = 0; _i628 < _list626.size; ++_i628) { - _elem559 = new FieldSchema(); - _elem559.read(iprot); - struct.success.add(_elem559); + _elem627 = new FieldSchema(); + _elem627.read(iprot); + struct.success.add(_elem627); } iprot.readListEnd(); } @@ -31886,9 +32538,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter561 : struct.success) + for (FieldSchema _iter629 : struct.success) { - _iter561.write(oprot); + _iter629.write(oprot); } oprot.writeListEnd(); } @@ -31943,9 +32595,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter562 : struct.success) + for (FieldSchema _iter630 : struct.success) { - _iter562.write(oprot); + _iter630.write(oprot); } } } @@ -31966,14 +32618,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list563 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list563.size); - FieldSchema _elem564; - for (int _i565 = 0; _i565 < _list563.size; ++_i565) + org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list631.size); + FieldSchema _elem632; + for (int _i633 = 0; _i633 < _list631.size; ++_i633) { - _elem564 = new FieldSchema(); - _elem564.read(iprot); - struct.success.add(_elem564); + _elem632 = new FieldSchema(); + _elem632.read(iprot); + struct.success.add(_elem632); } } struct.setSuccessIsSet(true); @@ -33127,14 +33779,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); - struct.success = new ArrayList(_list566.size); - FieldSchema _elem567; - for (int _i568 = 0; _i568 < _list566.size; ++_i568) + org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); + struct.success = new ArrayList(_list634.size); + FieldSchema _elem635; + for (int _i636 = 0; _i636 < _list634.size; ++_i636) { - _elem567 = new FieldSchema(); - _elem567.read(iprot); - struct.success.add(_elem567); + _elem635 = new FieldSchema(); + _elem635.read(iprot); + struct.success.add(_elem635); } iprot.readListEnd(); } @@ -33187,9 +33839,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter569 : struct.success) + for (FieldSchema _iter637 : struct.success) { - _iter569.write(oprot); + _iter637.write(oprot); } oprot.writeListEnd(); } @@ -33244,9 +33896,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter570 : struct.success) + for (FieldSchema _iter638 : struct.success) { - _iter570.write(oprot); + _iter638.write(oprot); } } } @@ -33267,14 +33919,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list571.size); - FieldSchema _elem572; - for (int _i573 = 0; _i573 < _list571.size; ++_i573) + org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list639.size); + FieldSchema _elem640; + for (int _i641 = 0; _i641 < _list639.size; ++_i641) { - _elem572 = new FieldSchema(); - _elem572.read(iprot); - struct.success.add(_elem572); + _elem640 = new FieldSchema(); + _elem640.read(iprot); + struct.success.add(_elem640); } } struct.setSuccessIsSet(true); @@ -34319,14 +34971,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list574 = iprot.readListBegin(); - struct.success = new ArrayList(_list574.size); - FieldSchema _elem575; - for (int _i576 = 0; _i576 < _list574.size; ++_i576) + org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); + struct.success = new ArrayList(_list642.size); + FieldSchema _elem643; + for (int _i644 = 0; _i644 < _list642.size; ++_i644) { - _elem575 = new FieldSchema(); - _elem575.read(iprot); - struct.success.add(_elem575); + _elem643 = new FieldSchema(); + _elem643.read(iprot); + struct.success.add(_elem643); } iprot.readListEnd(); } @@ -34379,9 +35031,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter577 : struct.success) + for (FieldSchema _iter645 : struct.success) { - _iter577.write(oprot); + _iter645.write(oprot); } oprot.writeListEnd(); } @@ -34436,9 +35088,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter578 : struct.success) + for (FieldSchema _iter646 : struct.success) { - _iter578.write(oprot); + _iter646.write(oprot); } } } @@ -34459,14 +35111,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list579 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list579.size); - FieldSchema _elem580; - for (int _i581 = 0; _i581 < _list579.size; ++_i581) + org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list647.size); + FieldSchema _elem648; + for (int _i649 = 0; _i649 < _list647.size; ++_i649) { - _elem580 = new FieldSchema(); - _elem580.read(iprot); - struct.success.add(_elem580); + _elem648 = new FieldSchema(); + _elem648.read(iprot); + struct.success.add(_elem648); } } struct.setSuccessIsSet(true); @@ -35620,14 +36272,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list582 = iprot.readListBegin(); - struct.success = new ArrayList(_list582.size); - FieldSchema _elem583; - for (int _i584 = 0; _i584 < _list582.size; ++_i584) + org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); + struct.success = new ArrayList(_list650.size); + FieldSchema _elem651; + for (int _i652 = 0; _i652 < _list650.size; ++_i652) { - _elem583 = new FieldSchema(); - _elem583.read(iprot); - struct.success.add(_elem583); + _elem651 = new FieldSchema(); + _elem651.read(iprot); + struct.success.add(_elem651); } iprot.readListEnd(); } @@ -35680,9 +36332,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter585 : struct.success) + for (FieldSchema _iter653 : struct.success) { - _iter585.write(oprot); + _iter653.write(oprot); } oprot.writeListEnd(); } @@ -35737,9 +36389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter586 : struct.success) + for (FieldSchema _iter654 : struct.success) { - _iter586.write(oprot); + _iter654.write(oprot); } } } @@ -35760,14 +36412,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list587 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list587.size); - FieldSchema _elem588; - for (int _i589 = 0; _i589 < _list587.size; ++_i589) + org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list655.size); + FieldSchema _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem588 = new FieldSchema(); - _elem588.read(iprot); - struct.success.add(_elem588); + _elem656 = new FieldSchema(); + _elem656.read(iprot); + struct.success.add(_elem656); } } struct.setSuccessIsSet(true); @@ -41007,13 +41659,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list590 = iprot.readListBegin(); - struct.success = new ArrayList(_list590.size); - String _elem591; - for (int _i592 = 0; _i592 < _list590.size; ++_i592) + org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); + struct.success = new ArrayList(_list658.size); + String _elem659; + for (int _i660 = 0; _i660 < _list658.size; ++_i660) { - _elem591 = iprot.readString(); - struct.success.add(_elem591); + _elem659 = iprot.readString(); + struct.success.add(_elem659); } iprot.readListEnd(); } @@ -41048,9 +41700,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter593 : struct.success) + for (String _iter661 : struct.success) { - oprot.writeString(_iter593); + oprot.writeString(_iter661); } oprot.writeListEnd(); } @@ -41089,9 +41741,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter594 : struct.success) + for (String _iter662 : struct.success) { - oprot.writeString(_iter594); + oprot.writeString(_iter662); } } } @@ -41106,13 +41758,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list595 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list595.size); - String _elem596; - for (int _i597 = 0; _i597 < _list595.size; ++_i597) + org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list663.size); + String _elem664; + for (int _i665 = 0; _i665 < _list663.size; ++_i665) { - _elem596 = iprot.readString(); - struct.success.add(_elem596); + _elem664 = iprot.readString(); + struct.success.add(_elem664); } } struct.setSuccessIsSet(true); @@ -41878,13 +42530,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list598 = iprot.readListBegin(); - struct.success = new ArrayList(_list598.size); - String _elem599; - for (int _i600 = 0; _i600 < _list598.size; ++_i600) + org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); + struct.success = new ArrayList(_list666.size); + String _elem667; + for (int _i668 = 0; _i668 < _list666.size; ++_i668) { - _elem599 = iprot.readString(); - struct.success.add(_elem599); + _elem667 = iprot.readString(); + struct.success.add(_elem667); } iprot.readListEnd(); } @@ -41919,9 +42571,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter601 : struct.success) + for (String _iter669 : struct.success) { - oprot.writeString(_iter601); + oprot.writeString(_iter669); } oprot.writeListEnd(); } @@ -41960,9 +42612,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter602 : struct.success) + for (String _iter670 : struct.success) { - oprot.writeString(_iter602); + oprot.writeString(_iter670); } } } @@ -41977,13 +42629,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list603.size); - String _elem604; - for (int _i605 = 0; _i605 < _list603.size; ++_i605) + org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list671.size); + String _elem672; + for (int _i673 = 0; _i673 < _list671.size; ++_i673) { - _elem604 = iprot.readString(); - struct.success.add(_elem604); + _elem672 = iprot.readString(); + struct.success.add(_elem672); } } struct.setSuccessIsSet(true); @@ -43436,13 +44088,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list606 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list606.size); - String _elem607; - for (int _i608 = 0; _i608 < _list606.size; ++_i608) + org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list674.size); + String _elem675; + for (int _i676 = 0; _i676 < _list674.size; ++_i676) { - _elem607 = iprot.readString(); - struct.tbl_names.add(_elem607); + _elem675 = iprot.readString(); + struct.tbl_names.add(_elem675); } iprot.readListEnd(); } @@ -43473,9 +44125,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter609 : struct.tbl_names) + for (String _iter677 : struct.tbl_names) { - oprot.writeString(_iter609); + oprot.writeString(_iter677); } oprot.writeListEnd(); } @@ -43512,9 +44164,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter610 : struct.tbl_names) + for (String _iter678 : struct.tbl_names) { - oprot.writeString(_iter610); + oprot.writeString(_iter678); } } } @@ -43530,13 +44182,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list611.size); - String _elem612; - for (int _i613 = 0; _i613 < _list611.size; ++_i613) + org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list679.size); + String _elem680; + for (int _i681 = 0; _i681 < _list679.size; ++_i681) { - _elem612 = iprot.readString(); - struct.tbl_names.add(_elem612); + _elem680 = iprot.readString(); + struct.tbl_names.add(_elem680); } } struct.setTbl_namesIsSet(true); @@ -44104,14 +44756,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list614 = iprot.readListBegin(); - struct.success = new ArrayList
(_list614.size); - Table _elem615; - for (int _i616 = 0; _i616 < _list614.size; ++_i616) + org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); + struct.success = new ArrayList
(_list682.size); + Table _elem683; + for (int _i684 = 0; _i684 < _list682.size; ++_i684) { - _elem615 = new Table(); - _elem615.read(iprot); - struct.success.add(_elem615); + _elem683 = new Table(); + _elem683.read(iprot); + struct.success.add(_elem683); } iprot.readListEnd(); } @@ -44164,9 +44816,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter617 : struct.success) + for (Table _iter685 : struct.success) { - _iter617.write(oprot); + _iter685.write(oprot); } oprot.writeListEnd(); } @@ -44221,9 +44873,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter618 : struct.success) + for (Table _iter686 : struct.success) { - _iter618.write(oprot); + _iter686.write(oprot); } } } @@ -44244,14 +44896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list619.size); - Table _elem620; - for (int _i621 = 0; _i621 < _list619.size; ++_i621) + org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list687.size); + Table _elem688; + for (int _i689 = 0; _i689 < _list687.size; ++_i689) { - _elem620 = new Table(); - _elem620.read(iprot); - struct.success.add(_elem620); + _elem688 = new Table(); + _elem688.read(iprot); + struct.success.add(_elem688); } } struct.setSuccessIsSet(true); @@ -45397,13 +46049,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list622 = iprot.readListBegin(); - struct.success = new ArrayList(_list622.size); - String _elem623; - for (int _i624 = 0; _i624 < _list622.size; ++_i624) + org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); + struct.success = new ArrayList(_list690.size); + String _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem623 = iprot.readString(); - struct.success.add(_elem623); + _elem691 = iprot.readString(); + struct.success.add(_elem691); } iprot.readListEnd(); } @@ -45456,9 +46108,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter625 : struct.success) + for (String _iter693 : struct.success) { - oprot.writeString(_iter625); + oprot.writeString(_iter693); } oprot.writeListEnd(); } @@ -45513,9 +46165,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter626 : struct.success) + for (String _iter694 : struct.success) { - oprot.writeString(_iter626); + oprot.writeString(_iter694); } } } @@ -45536,13 +46188,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list627.size); - String _elem628; - for (int _i629 = 0; _i629 < _list627.size; ++_i629) + org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list695.size); + String _elem696; + for (int _i697 = 0; _i697 < _list695.size; ++_i697) { - _elem628 = iprot.readString(); - struct.success.add(_elem628); + _elem696 = iprot.readString(); + struct.success.add(_elem696); } } struct.setSuccessIsSet(true); @@ -51401,14 +52053,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list630 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list630.size); - Partition _elem631; - for (int _i632 = 0; _i632 < _list630.size; ++_i632) + org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list698.size); + Partition _elem699; + for (int _i700 = 0; _i700 < _list698.size; ++_i700) { - _elem631 = new Partition(); - _elem631.read(iprot); - struct.new_parts.add(_elem631); + _elem699 = new Partition(); + _elem699.read(iprot); + struct.new_parts.add(_elem699); } iprot.readListEnd(); } @@ -51434,9 +52086,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter633 : struct.new_parts) + for (Partition _iter701 : struct.new_parts) { - _iter633.write(oprot); + _iter701.write(oprot); } oprot.writeListEnd(); } @@ -51467,9 +52119,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter634 : struct.new_parts) + for (Partition _iter702 : struct.new_parts) { - _iter634.write(oprot); + _iter702.write(oprot); } } } @@ -51481,14 +52133,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list635.size); - Partition _elem636; - for (int _i637 = 0; _i637 < _list635.size; ++_i637) + org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list703.size); + Partition _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem636 = new Partition(); - _elem636.read(iprot); - struct.new_parts.add(_elem636); + _elem704 = new Partition(); + _elem704.read(iprot); + struct.new_parts.add(_elem704); } } struct.setNew_partsIsSet(true); @@ -52489,14 +53141,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list638 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list638.size); - PartitionSpec _elem639; - for (int _i640 = 0; _i640 < _list638.size; ++_i640) + org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list706.size); + PartitionSpec _elem707; + for (int _i708 = 0; _i708 < _list706.size; ++_i708) { - _elem639 = new PartitionSpec(); - _elem639.read(iprot); - struct.new_parts.add(_elem639); + _elem707 = new PartitionSpec(); + _elem707.read(iprot); + struct.new_parts.add(_elem707); } iprot.readListEnd(); } @@ -52522,9 +53174,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter641 : struct.new_parts) + for (PartitionSpec _iter709 : struct.new_parts) { - _iter641.write(oprot); + _iter709.write(oprot); } oprot.writeListEnd(); } @@ -52555,9 +53207,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter642 : struct.new_parts) + for (PartitionSpec _iter710 : struct.new_parts) { - _iter642.write(oprot); + _iter710.write(oprot); } } } @@ -52569,14 +53221,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list643.size); - PartitionSpec _elem644; - for (int _i645 = 0; _i645 < _list643.size; ++_i645) + org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list711.size); + PartitionSpec _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem644 = new PartitionSpec(); - _elem644.read(iprot); - struct.new_parts.add(_elem644); + _elem712 = new PartitionSpec(); + _elem712.read(iprot); + struct.new_parts.add(_elem712); } } struct.setNew_partsIsSet(true); @@ -53752,13 +54404,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list646.size); - String _elem647; - for (int _i648 = 0; _i648 < _list646.size; ++_i648) + org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list714.size); + String _elem715; + for (int _i716 = 0; _i716 < _list714.size; ++_i716) { - _elem647 = iprot.readString(); - struct.part_vals.add(_elem647); + _elem715 = iprot.readString(); + struct.part_vals.add(_elem715); } iprot.readListEnd(); } @@ -53794,9 +54446,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter649 : struct.part_vals) + for (String _iter717 : struct.part_vals) { - oprot.writeString(_iter649); + oprot.writeString(_iter717); } oprot.writeListEnd(); } @@ -53839,9 +54491,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter650 : struct.part_vals) + for (String _iter718 : struct.part_vals) { - oprot.writeString(_iter650); + oprot.writeString(_iter718); } } } @@ -53861,13 +54513,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list651.size); - String _elem652; - for (int _i653 = 0; _i653 < _list651.size; ++_i653) + org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list719.size); + String _elem720; + for (int _i721 = 0; _i721 < _list719.size; ++_i721) { - _elem652 = iprot.readString(); - struct.part_vals.add(_elem652); + _elem720 = iprot.readString(); + struct.part_vals.add(_elem720); } } struct.setPart_valsIsSet(true); @@ -56176,13 +56828,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list654 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list654.size); - String _elem655; - for (int _i656 = 0; _i656 < _list654.size; ++_i656) + org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list722.size); + String _elem723; + for (int _i724 = 0; _i724 < _list722.size; ++_i724) { - _elem655 = iprot.readString(); - struct.part_vals.add(_elem655); + _elem723 = iprot.readString(); + struct.part_vals.add(_elem723); } iprot.readListEnd(); } @@ -56227,9 +56879,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter657 : struct.part_vals) + for (String _iter725 : struct.part_vals) { - oprot.writeString(_iter657); + oprot.writeString(_iter725); } oprot.writeListEnd(); } @@ -56280,9 +56932,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter658 : struct.part_vals) + for (String _iter726 : struct.part_vals) { - oprot.writeString(_iter658); + oprot.writeString(_iter726); } } } @@ -56305,13 +56957,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list659.size); - String _elem660; - for (int _i661 = 0; _i661 < _list659.size; ++_i661) + org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list727.size); + String _elem728; + for (int _i729 = 0; _i729 < _list727.size; ++_i729) { - _elem660 = iprot.readString(); - struct.part_vals.add(_elem660); + _elem728 = iprot.readString(); + struct.part_vals.add(_elem728); } } struct.setPart_valsIsSet(true); @@ -60181,13 +60833,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list662 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list662.size); - String _elem663; - for (int _i664 = 0; _i664 < _list662.size; ++_i664) + org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list730.size); + String _elem731; + for (int _i732 = 0; _i732 < _list730.size; ++_i732) { - _elem663 = iprot.readString(); - struct.part_vals.add(_elem663); + _elem731 = iprot.readString(); + struct.part_vals.add(_elem731); } iprot.readListEnd(); } @@ -60231,9 +60883,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter665 : struct.part_vals) + for (String _iter733 : struct.part_vals) { - oprot.writeString(_iter665); + oprot.writeString(_iter733); } oprot.writeListEnd(); } @@ -60282,9 +60934,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter666 : struct.part_vals) + for (String _iter734 : struct.part_vals) { - oprot.writeString(_iter666); + oprot.writeString(_iter734); } } } @@ -60307,13 +60959,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list667.size); - String _elem668; - for (int _i669 = 0; _i669 < _list667.size; ++_i669) + org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list735.size); + String _elem736; + for (int _i737 = 0; _i737 < _list735.size; ++_i737) { - _elem668 = iprot.readString(); - struct.part_vals.add(_elem668); + _elem736 = iprot.readString(); + struct.part_vals.add(_elem736); } } struct.setPart_valsIsSet(true); @@ -61552,13 +62204,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list670 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list670.size); - String _elem671; - for (int _i672 = 0; _i672 < _list670.size; ++_i672) + org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list738.size); + String _elem739; + for (int _i740 = 0; _i740 < _list738.size; ++_i740) { - _elem671 = iprot.readString(); - struct.part_vals.add(_elem671); + _elem739 = iprot.readString(); + struct.part_vals.add(_elem739); } iprot.readListEnd(); } @@ -61611,9 +62263,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter673 : struct.part_vals) + for (String _iter741 : struct.part_vals) { - oprot.writeString(_iter673); + oprot.writeString(_iter741); } oprot.writeListEnd(); } @@ -61670,9 +62322,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter674 : struct.part_vals) + for (String _iter742 : struct.part_vals) { - oprot.writeString(_iter674); + oprot.writeString(_iter742); } } } @@ -61698,13 +62350,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list675.size); - String _elem676; - for (int _i677 = 0; _i677 < _list675.size; ++_i677) + org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list743.size); + String _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem676 = iprot.readString(); - struct.part_vals.add(_elem676); + _elem744 = iprot.readString(); + struct.part_vals.add(_elem744); } } struct.setPart_valsIsSet(true); @@ -66306,13 +66958,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list678 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list678.size); - String _elem679; - for (int _i680 = 0; _i680 < _list678.size; ++_i680) + org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list746.size); + String _elem747; + for (int _i748 = 0; _i748 < _list746.size; ++_i748) { - _elem679 = iprot.readString(); - struct.part_vals.add(_elem679); + _elem747 = iprot.readString(); + struct.part_vals.add(_elem747); } iprot.readListEnd(); } @@ -66348,9 +67000,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter681 : struct.part_vals) + for (String _iter749 : struct.part_vals) { - oprot.writeString(_iter681); + oprot.writeString(_iter749); } oprot.writeListEnd(); } @@ -66393,9 +67045,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter682 : struct.part_vals) + for (String _iter750 : struct.part_vals) { - oprot.writeString(_iter682); + oprot.writeString(_iter750); } } } @@ -66415,13 +67067,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list683.size); - String _elem684; - for (int _i685 = 0; _i685 < _list683.size; ++_i685) + org.apache.thrift.protocol.TList _list751 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list751.size); + String _elem752; + for (int _i753 = 0; _i753 < _list751.size; ++_i753) { - _elem684 = iprot.readString(); - struct.part_vals.add(_elem684); + _elem752 = iprot.readString(); + struct.part_vals.add(_elem752); } } struct.setPart_valsIsSet(true); @@ -67639,15 +68291,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map686 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map686.size); - String _key687; - String _val688; - for (int _i689 = 0; _i689 < _map686.size; ++_i689) + org.apache.thrift.protocol.TMap _map754 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map754.size); + String _key755; + String _val756; + for (int _i757 = 0; _i757 < _map754.size; ++_i757) { - _key687 = iprot.readString(); - _val688 = iprot.readString(); - struct.partitionSpecs.put(_key687, _val688); + _key755 = iprot.readString(); + _val756 = iprot.readString(); + struct.partitionSpecs.put(_key755, _val756); } iprot.readMapEnd(); } @@ -67705,10 +68357,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter690 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter758 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter690.getKey()); - oprot.writeString(_iter690.getValue()); + oprot.writeString(_iter758.getKey()); + oprot.writeString(_iter758.getValue()); } oprot.writeMapEnd(); } @@ -67771,10 +68423,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter691 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter759 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter691.getKey()); - oprot.writeString(_iter691.getValue()); + oprot.writeString(_iter759.getKey()); + oprot.writeString(_iter759.getValue()); } } } @@ -67798,15 +68450,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map692 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map692.size); - String _key693; - String _val694; - for (int _i695 = 0; _i695 < _map692.size; ++_i695) + org.apache.thrift.protocol.TMap _map760 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map760.size); + String _key761; + String _val762; + for (int _i763 = 0; _i763 < _map760.size; ++_i763) { - _key693 = iprot.readString(); - _val694 = iprot.readString(); - struct.partitionSpecs.put(_key693, _val694); + _key761 = iprot.readString(); + _val762 = iprot.readString(); + struct.partitionSpecs.put(_key761, _val762); } } struct.setPartitionSpecsIsSet(true); @@ -69288,13 +69940,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list696 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list696.size); - String _elem697; - for (int _i698 = 0; _i698 < _list696.size; ++_i698) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list764.size); + String _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem697 = iprot.readString(); - struct.part_vals.add(_elem697); + _elem765 = iprot.readString(); + struct.part_vals.add(_elem765); } iprot.readListEnd(); } @@ -69314,13 +69966,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list699 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list699.size); - String _elem700; - for (int _i701 = 0; _i701 < _list699.size; ++_i701) + org.apache.thrift.protocol.TList _list767 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list767.size); + String _elem768; + for (int _i769 = 0; _i769 < _list767.size; ++_i769) { - _elem700 = iprot.readString(); - struct.group_names.add(_elem700); + _elem768 = iprot.readString(); + struct.group_names.add(_elem768); } iprot.readListEnd(); } @@ -69356,9 +70008,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter702 : struct.part_vals) + for (String _iter770 : struct.part_vals) { - oprot.writeString(_iter702); + oprot.writeString(_iter770); } oprot.writeListEnd(); } @@ -69373,9 +70025,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter703 : struct.group_names) + for (String _iter771 : struct.group_names) { - oprot.writeString(_iter703); + oprot.writeString(_iter771); } oprot.writeListEnd(); } @@ -69424,9 +70076,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter704 : struct.part_vals) + for (String _iter772 : struct.part_vals) { - oprot.writeString(_iter704); + oprot.writeString(_iter772); } } } @@ -69436,9 +70088,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter705 : struct.group_names) + for (String _iter773 : struct.group_names) { - oprot.writeString(_iter705); + oprot.writeString(_iter773); } } } @@ -69458,13 +70110,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list706 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list706.size); - String _elem707; - for (int _i708 = 0; _i708 < _list706.size; ++_i708) + org.apache.thrift.protocol.TList _list774 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list774.size); + String _elem775; + for (int _i776 = 0; _i776 < _list774.size; ++_i776) { - _elem707 = iprot.readString(); - struct.part_vals.add(_elem707); + _elem775 = iprot.readString(); + struct.part_vals.add(_elem775); } } struct.setPart_valsIsSet(true); @@ -69475,13 +70127,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list709 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list709.size); - String _elem710; - for (int _i711 = 0; _i711 < _list709.size; ++_i711) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list777.size); + String _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem710 = iprot.readString(); - struct.group_names.add(_elem710); + _elem778 = iprot.readString(); + struct.group_names.add(_elem778); } } struct.setGroup_namesIsSet(true); @@ -72250,14 +72902,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list712 = iprot.readListBegin(); - struct.success = new ArrayList(_list712.size); - Partition _elem713; - for (int _i714 = 0; _i714 < _list712.size; ++_i714) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.success = new ArrayList(_list780.size); + Partition _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem713 = new Partition(); - _elem713.read(iprot); - struct.success.add(_elem713); + _elem781 = new Partition(); + _elem781.read(iprot); + struct.success.add(_elem781); } iprot.readListEnd(); } @@ -72301,9 +72953,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter715 : struct.success) + for (Partition _iter783 : struct.success) { - _iter715.write(oprot); + _iter783.write(oprot); } oprot.writeListEnd(); } @@ -72350,9 +73002,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter716 : struct.success) + for (Partition _iter784 : struct.success) { - _iter716.write(oprot); + _iter784.write(oprot); } } } @@ -72370,14 +73022,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list717 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list717.size); - Partition _elem718; - for (int _i719 = 0; _i719 < _list717.size; ++_i719) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list785.size); + Partition _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem718 = new Partition(); - _elem718.read(iprot); - struct.success.add(_elem718); + _elem786 = new Partition(); + _elem786.read(iprot); + struct.success.add(_elem786); } } struct.setSuccessIsSet(true); @@ -73067,13 +73719,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list720 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list720.size); - String _elem721; - for (int _i722 = 0; _i722 < _list720.size; ++_i722) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list788.size); + String _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem721 = iprot.readString(); - struct.group_names.add(_elem721); + _elem789 = iprot.readString(); + struct.group_names.add(_elem789); } iprot.readListEnd(); } @@ -73117,9 +73769,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter723 : struct.group_names) + for (String _iter791 : struct.group_names) { - oprot.writeString(_iter723); + oprot.writeString(_iter791); } oprot.writeListEnd(); } @@ -73174,9 +73826,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter724 : struct.group_names) + for (String _iter792 : struct.group_names) { - oprot.writeString(_iter724); + oprot.writeString(_iter792); } } } @@ -73204,13 +73856,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list725 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list725.size); - String _elem726; - for (int _i727 = 0; _i727 < _list725.size; ++_i727) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list793.size); + String _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem726 = iprot.readString(); - struct.group_names.add(_elem726); + _elem794 = iprot.readString(); + struct.group_names.add(_elem794); } } struct.setGroup_namesIsSet(true); @@ -73697,14 +74349,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list728 = iprot.readListBegin(); - struct.success = new ArrayList(_list728.size); - Partition _elem729; - for (int _i730 = 0; _i730 < _list728.size; ++_i730) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.success = new ArrayList(_list796.size); + Partition _elem797; + for (int _i798 = 0; _i798 < _list796.size; ++_i798) { - _elem729 = new Partition(); - _elem729.read(iprot); - struct.success.add(_elem729); + _elem797 = new Partition(); + _elem797.read(iprot); + struct.success.add(_elem797); } iprot.readListEnd(); } @@ -73748,9 +74400,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter731 : struct.success) + for (Partition _iter799 : struct.success) { - _iter731.write(oprot); + _iter799.write(oprot); } oprot.writeListEnd(); } @@ -73797,9 +74449,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter732 : struct.success) + for (Partition _iter800 : struct.success) { - _iter732.write(oprot); + _iter800.write(oprot); } } } @@ -73817,14 +74469,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list733 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list733.size); - Partition _elem734; - for (int _i735 = 0; _i735 < _list733.size; ++_i735) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list801.size); + Partition _elem802; + for (int _i803 = 0; _i803 < _list801.size; ++_i803) { - _elem734 = new Partition(); - _elem734.read(iprot); - struct.success.add(_elem734); + _elem802 = new Partition(); + _elem802.read(iprot); + struct.success.add(_elem802); } } struct.setSuccessIsSet(true); @@ -74887,14 +75539,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list736 = iprot.readListBegin(); - struct.success = new ArrayList(_list736.size); - PartitionSpec _elem737; - for (int _i738 = 0; _i738 < _list736.size; ++_i738) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.success = new ArrayList(_list804.size); + PartitionSpec _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem737 = new PartitionSpec(); - _elem737.read(iprot); - struct.success.add(_elem737); + _elem805 = new PartitionSpec(); + _elem805.read(iprot); + struct.success.add(_elem805); } iprot.readListEnd(); } @@ -74938,9 +75590,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter739 : struct.success) + for (PartitionSpec _iter807 : struct.success) { - _iter739.write(oprot); + _iter807.write(oprot); } oprot.writeListEnd(); } @@ -74987,9 +75639,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter740 : struct.success) + for (PartitionSpec _iter808 : struct.success) { - _iter740.write(oprot); + _iter808.write(oprot); } } } @@ -75007,14 +75659,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list741 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list741.size); - PartitionSpec _elem742; - for (int _i743 = 0; _i743 < _list741.size; ++_i743) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list809.size); + PartitionSpec _elem810; + for (int _i811 = 0; _i811 < _list809.size; ++_i811) { - _elem742 = new PartitionSpec(); - _elem742.read(iprot); - struct.success.add(_elem742); + _elem810 = new PartitionSpec(); + _elem810.read(iprot); + struct.success.add(_elem810); } } struct.setSuccessIsSet(true); @@ -75993,13 +76645,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list744 = iprot.readListBegin(); - struct.success = new ArrayList(_list744.size); - String _elem745; - for (int _i746 = 0; _i746 < _list744.size; ++_i746) + org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); + struct.success = new ArrayList(_list812.size); + String _elem813; + for (int _i814 = 0; _i814 < _list812.size; ++_i814) { - _elem745 = iprot.readString(); - struct.success.add(_elem745); + _elem813 = iprot.readString(); + struct.success.add(_elem813); } iprot.readListEnd(); } @@ -76034,9 +76686,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter747 : struct.success) + for (String _iter815 : struct.success) { - oprot.writeString(_iter747); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -76075,9 +76727,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter748 : struct.success) + for (String _iter816 : struct.success) { - oprot.writeString(_iter748); + oprot.writeString(_iter816); } } } @@ -76092,13 +76744,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list749 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list749.size); - String _elem750; - for (int _i751 = 0; _i751 < _list749.size; ++_i751) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list817.size); + String _elem818; + for (int _i819 = 0; _i819 < _list817.size; ++_i819) { - _elem750 = iprot.readString(); - struct.success.add(_elem750); + _elem818 = iprot.readString(); + struct.success.add(_elem818); } } struct.setSuccessIsSet(true); @@ -76686,13 +77338,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list752 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list752.size); - String _elem753; - for (int _i754 = 0; _i754 < _list752.size; ++_i754) + org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list820.size); + String _elem821; + for (int _i822 = 0; _i822 < _list820.size; ++_i822) { - _elem753 = iprot.readString(); - struct.part_vals.add(_elem753); + _elem821 = iprot.readString(); + struct.part_vals.add(_elem821); } iprot.readListEnd(); } @@ -76736,9 +77388,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter755 : struct.part_vals) + for (String _iter823 : struct.part_vals) { - oprot.writeString(_iter755); + oprot.writeString(_iter823); } oprot.writeListEnd(); } @@ -76787,9 +77439,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter756 : struct.part_vals) + for (String _iter824 : struct.part_vals) { - oprot.writeString(_iter756); + oprot.writeString(_iter824); } } } @@ -76812,13 +77464,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list757 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list757.size); - String _elem758; - for (int _i759 = 0; _i759 < _list757.size; ++_i759) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem758 = iprot.readString(); - struct.part_vals.add(_elem758); + _elem826 = iprot.readString(); + struct.part_vals.add(_elem826); } } struct.setPart_valsIsSet(true); @@ -77309,14 +77961,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list760 = iprot.readListBegin(); - struct.success = new ArrayList(_list760.size); - Partition _elem761; - for (int _i762 = 0; _i762 < _list760.size; ++_i762) + org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); + struct.success = new ArrayList(_list828.size); + Partition _elem829; + for (int _i830 = 0; _i830 < _list828.size; ++_i830) { - _elem761 = new Partition(); - _elem761.read(iprot); - struct.success.add(_elem761); + _elem829 = new Partition(); + _elem829.read(iprot); + struct.success.add(_elem829); } iprot.readListEnd(); } @@ -77360,9 +78012,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter763 : struct.success) + for (Partition _iter831 : struct.success) { - _iter763.write(oprot); + _iter831.write(oprot); } oprot.writeListEnd(); } @@ -77409,9 +78061,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter764 : struct.success) + for (Partition _iter832 : struct.success) { - _iter764.write(oprot); + _iter832.write(oprot); } } } @@ -77429,14 +78081,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list765 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list765.size); - Partition _elem766; - for (int _i767 = 0; _i767 < _list765.size; ++_i767) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list833.size); + Partition _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem766 = new Partition(); - _elem766.read(iprot); - struct.success.add(_elem766); + _elem834 = new Partition(); + _elem834.read(iprot); + struct.success.add(_elem834); } } struct.setSuccessIsSet(true); @@ -78208,13 +78860,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list768 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list768.size); - String _elem769; - for (int _i770 = 0; _i770 < _list768.size; ++_i770) + org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list836.size); + String _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem769 = iprot.readString(); - struct.part_vals.add(_elem769); + _elem837 = iprot.readString(); + struct.part_vals.add(_elem837); } iprot.readListEnd(); } @@ -78242,13 +78894,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list771 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list771.size); - String _elem772; - for (int _i773 = 0; _i773 < _list771.size; ++_i773) + org.apache.thrift.protocol.TList _list839 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list839.size); + String _elem840; + for (int _i841 = 0; _i841 < _list839.size; ++_i841) { - _elem772 = iprot.readString(); - struct.group_names.add(_elem772); + _elem840 = iprot.readString(); + struct.group_names.add(_elem840); } iprot.readListEnd(); } @@ -78284,9 +78936,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter774 : struct.part_vals) + for (String _iter842 : struct.part_vals) { - oprot.writeString(_iter774); + oprot.writeString(_iter842); } oprot.writeListEnd(); } @@ -78304,9 +78956,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter775 : struct.group_names) + for (String _iter843 : struct.group_names) { - oprot.writeString(_iter775); + oprot.writeString(_iter843); } oprot.writeListEnd(); } @@ -78358,9 +79010,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter776 : struct.part_vals) + for (String _iter844 : struct.part_vals) { - oprot.writeString(_iter776); + oprot.writeString(_iter844); } } } @@ -78373,9 +79025,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter777 : struct.group_names) + for (String _iter845 : struct.group_names) { - oprot.writeString(_iter777); + oprot.writeString(_iter845); } } } @@ -78395,13 +79047,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list778 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list778.size); - String _elem779; - for (int _i780 = 0; _i780 < _list778.size; ++_i780) + org.apache.thrift.protocol.TList _list846 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list846.size); + String _elem847; + for (int _i848 = 0; _i848 < _list846.size; ++_i848) { - _elem779 = iprot.readString(); - struct.part_vals.add(_elem779); + _elem847 = iprot.readString(); + struct.part_vals.add(_elem847); } } struct.setPart_valsIsSet(true); @@ -78416,13 +79068,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list781 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list781.size); - String _elem782; - for (int _i783 = 0; _i783 < _list781.size; ++_i783) + org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list849.size); + String _elem850; + for (int _i851 = 0; _i851 < _list849.size; ++_i851) { - _elem782 = iprot.readString(); - struct.group_names.add(_elem782); + _elem850 = iprot.readString(); + struct.group_names.add(_elem850); } } struct.setGroup_namesIsSet(true); @@ -78909,14 +79561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list784 = iprot.readListBegin(); - struct.success = new ArrayList(_list784.size); - Partition _elem785; - for (int _i786 = 0; _i786 < _list784.size; ++_i786) + org.apache.thrift.protocol.TList _list852 = iprot.readListBegin(); + struct.success = new ArrayList(_list852.size); + Partition _elem853; + for (int _i854 = 0; _i854 < _list852.size; ++_i854) { - _elem785 = new Partition(); - _elem785.read(iprot); - struct.success.add(_elem785); + _elem853 = new Partition(); + _elem853.read(iprot); + struct.success.add(_elem853); } iprot.readListEnd(); } @@ -78960,9 +79612,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter787 : struct.success) + for (Partition _iter855 : struct.success) { - _iter787.write(oprot); + _iter855.write(oprot); } oprot.writeListEnd(); } @@ -79009,9 +79661,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter788 : struct.success) + for (Partition _iter856 : struct.success) { - _iter788.write(oprot); + _iter856.write(oprot); } } } @@ -79029,14 +79681,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list789 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list789.size); - Partition _elem790; - for (int _i791 = 0; _i791 < _list789.size; ++_i791) + org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list857.size); + Partition _elem858; + for (int _i859 = 0; _i859 < _list857.size; ++_i859) { - _elem790 = new Partition(); - _elem790.read(iprot); - struct.success.add(_elem790); + _elem858 = new Partition(); + _elem858.read(iprot); + struct.success.add(_elem858); } } struct.setSuccessIsSet(true); @@ -79629,13 +80281,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list792 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list792.size); - String _elem793; - for (int _i794 = 0; _i794 < _list792.size; ++_i794) + org.apache.thrift.protocol.TList _list860 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list860.size); + String _elem861; + for (int _i862 = 0; _i862 < _list860.size; ++_i862) { - _elem793 = iprot.readString(); - struct.part_vals.add(_elem793); + _elem861 = iprot.readString(); + struct.part_vals.add(_elem861); } iprot.readListEnd(); } @@ -79679,9 +80331,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter795 : struct.part_vals) + for (String _iter863 : struct.part_vals) { - oprot.writeString(_iter795); + oprot.writeString(_iter863); } oprot.writeListEnd(); } @@ -79730,9 +80382,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter796 : struct.part_vals) + for (String _iter864 : struct.part_vals) { - oprot.writeString(_iter796); + oprot.writeString(_iter864); } } } @@ -79755,13 +80407,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list797 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list797.size); - String _elem798; - for (int _i799 = 0; _i799 < _list797.size; ++_i799) + org.apache.thrift.protocol.TList _list865 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list865.size); + String _elem866; + for (int _i867 = 0; _i867 < _list865.size; ++_i867) { - _elem798 = iprot.readString(); - struct.part_vals.add(_elem798); + _elem866 = iprot.readString(); + struct.part_vals.add(_elem866); } } struct.setPart_valsIsSet(true); @@ -80249,13 +80901,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); - struct.success = new ArrayList(_list800.size); - String _elem801; - for (int _i802 = 0; _i802 < _list800.size; ++_i802) + org.apache.thrift.protocol.TList _list868 = iprot.readListBegin(); + struct.success = new ArrayList(_list868.size); + String _elem869; + for (int _i870 = 0; _i870 < _list868.size; ++_i870) { - _elem801 = iprot.readString(); - struct.success.add(_elem801); + _elem869 = iprot.readString(); + struct.success.add(_elem869); } iprot.readListEnd(); } @@ -80299,9 +80951,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter803 : struct.success) + for (String _iter871 : struct.success) { - oprot.writeString(_iter803); + oprot.writeString(_iter871); } oprot.writeListEnd(); } @@ -80348,9 +81000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter804 : struct.success) + for (String _iter872 : struct.success) { - oprot.writeString(_iter804); + oprot.writeString(_iter872); } } } @@ -80368,13 +81020,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list805 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list805.size); - String _elem806; - for (int _i807 = 0; _i807 < _list805.size; ++_i807) + org.apache.thrift.protocol.TList _list873 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list873.size); + String _elem874; + for (int _i875 = 0; _i875 < _list873.size; ++_i875) { - _elem806 = iprot.readString(); - struct.success.add(_elem806); + _elem874 = iprot.readString(); + struct.success.add(_elem874); } } struct.setSuccessIsSet(true); @@ -81541,14 +82193,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list808 = iprot.readListBegin(); - struct.success = new ArrayList(_list808.size); - Partition _elem809; - for (int _i810 = 0; _i810 < _list808.size; ++_i810) + org.apache.thrift.protocol.TList _list876 = iprot.readListBegin(); + struct.success = new ArrayList(_list876.size); + Partition _elem877; + for (int _i878 = 0; _i878 < _list876.size; ++_i878) { - _elem809 = new Partition(); - _elem809.read(iprot); - struct.success.add(_elem809); + _elem877 = new Partition(); + _elem877.read(iprot); + struct.success.add(_elem877); } iprot.readListEnd(); } @@ -81592,9 +82244,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter811 : struct.success) + for (Partition _iter879 : struct.success) { - _iter811.write(oprot); + _iter879.write(oprot); } oprot.writeListEnd(); } @@ -81641,9 +82293,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter812 : struct.success) + for (Partition _iter880 : struct.success) { - _iter812.write(oprot); + _iter880.write(oprot); } } } @@ -81661,14 +82313,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list813 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list813.size); - Partition _elem814; - for (int _i815 = 0; _i815 < _list813.size; ++_i815) + org.apache.thrift.protocol.TList _list881 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list881.size); + Partition _elem882; + for (int _i883 = 0; _i883 < _list881.size; ++_i883) { - _elem814 = new Partition(); - _elem814.read(iprot); - struct.success.add(_elem814); + _elem882 = new Partition(); + _elem882.read(iprot); + struct.success.add(_elem882); } } struct.setSuccessIsSet(true); @@ -82835,14 +83487,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list816 = iprot.readListBegin(); - struct.success = new ArrayList(_list816.size); - PartitionSpec _elem817; - for (int _i818 = 0; _i818 < _list816.size; ++_i818) + org.apache.thrift.protocol.TList _list884 = iprot.readListBegin(); + struct.success = new ArrayList(_list884.size); + PartitionSpec _elem885; + for (int _i886 = 0; _i886 < _list884.size; ++_i886) { - _elem817 = new PartitionSpec(); - _elem817.read(iprot); - struct.success.add(_elem817); + _elem885 = new PartitionSpec(); + _elem885.read(iprot); + struct.success.add(_elem885); } iprot.readListEnd(); } @@ -82886,9 +83538,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter819 : struct.success) + for (PartitionSpec _iter887 : struct.success) { - _iter819.write(oprot); + _iter887.write(oprot); } oprot.writeListEnd(); } @@ -82935,9 +83587,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter820 : struct.success) + for (PartitionSpec _iter888 : struct.success) { - _iter820.write(oprot); + _iter888.write(oprot); } } } @@ -82955,14 +83607,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list821 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list821.size); - PartitionSpec _elem822; - for (int _i823 = 0; _i823 < _list821.size; ++_i823) + org.apache.thrift.protocol.TList _list889 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list889.size); + PartitionSpec _elem890; + for (int _i891 = 0; _i891 < _list889.size; ++_i891) { - _elem822 = new PartitionSpec(); - _elem822.read(iprot); - struct.success.add(_elem822); + _elem890 = new PartitionSpec(); + _elem890.read(iprot); + struct.success.add(_elem890); } } struct.setSuccessIsSet(true); @@ -84410,13 +85062,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list824 = iprot.readListBegin(); - struct.names = new ArrayList(_list824.size); - String _elem825; - for (int _i826 = 0; _i826 < _list824.size; ++_i826) + org.apache.thrift.protocol.TList _list892 = iprot.readListBegin(); + struct.names = new ArrayList(_list892.size); + String _elem893; + for (int _i894 = 0; _i894 < _list892.size; ++_i894) { - _elem825 = iprot.readString(); - struct.names.add(_elem825); + _elem893 = iprot.readString(); + struct.names.add(_elem893); } iprot.readListEnd(); } @@ -84452,9 +85104,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter827 : struct.names) + for (String _iter895 : struct.names) { - oprot.writeString(_iter827); + oprot.writeString(_iter895); } oprot.writeListEnd(); } @@ -84497,9 +85149,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter828 : struct.names) + for (String _iter896 : struct.names) { - oprot.writeString(_iter828); + oprot.writeString(_iter896); } } } @@ -84519,13 +85171,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list829 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list829.size); - String _elem830; - for (int _i831 = 0; _i831 < _list829.size; ++_i831) + org.apache.thrift.protocol.TList _list897 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list897.size); + String _elem898; + for (int _i899 = 0; _i899 < _list897.size; ++_i899) { - _elem830 = iprot.readString(); - struct.names.add(_elem830); + _elem898 = iprot.readString(); + struct.names.add(_elem898); } } struct.setNamesIsSet(true); @@ -85012,14 +85664,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list832 = iprot.readListBegin(); - struct.success = new ArrayList(_list832.size); - Partition _elem833; - for (int _i834 = 0; _i834 < _list832.size; ++_i834) + org.apache.thrift.protocol.TList _list900 = iprot.readListBegin(); + struct.success = new ArrayList(_list900.size); + Partition _elem901; + for (int _i902 = 0; _i902 < _list900.size; ++_i902) { - _elem833 = new Partition(); - _elem833.read(iprot); - struct.success.add(_elem833); + _elem901 = new Partition(); + _elem901.read(iprot); + struct.success.add(_elem901); } iprot.readListEnd(); } @@ -85063,9 +85715,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter835 : struct.success) + for (Partition _iter903 : struct.success) { - _iter835.write(oprot); + _iter903.write(oprot); } oprot.writeListEnd(); } @@ -85112,9 +85764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter836 : struct.success) + for (Partition _iter904 : struct.success) { - _iter836.write(oprot); + _iter904.write(oprot); } } } @@ -85132,14 +85784,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list837 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list837.size); - Partition _elem838; - for (int _i839 = 0; _i839 < _list837.size; ++_i839) + org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list905.size); + Partition _elem906; + for (int _i907 = 0; _i907 < _list905.size; ++_i907) { - _elem838 = new Partition(); - _elem838.read(iprot); - struct.success.add(_elem838); + _elem906 = new Partition(); + _elem906.read(iprot); + struct.success.add(_elem906); } } struct.setSuccessIsSet(true); @@ -86689,14 +87341,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list840 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list840.size); - Partition _elem841; - for (int _i842 = 0; _i842 < _list840.size; ++_i842) + org.apache.thrift.protocol.TList _list908 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list908.size); + Partition _elem909; + for (int _i910 = 0; _i910 < _list908.size; ++_i910) { - _elem841 = new Partition(); - _elem841.read(iprot); - struct.new_parts.add(_elem841); + _elem909 = new Partition(); + _elem909.read(iprot); + struct.new_parts.add(_elem909); } iprot.readListEnd(); } @@ -86732,9 +87384,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter843 : struct.new_parts) + for (Partition _iter911 : struct.new_parts) { - _iter843.write(oprot); + _iter911.write(oprot); } oprot.writeListEnd(); } @@ -86777,9 +87429,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter844 : struct.new_parts) + for (Partition _iter912 : struct.new_parts) { - _iter844.write(oprot); + _iter912.write(oprot); } } } @@ -86799,14 +87451,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list845.size); - Partition _elem846; - for (int _i847 = 0; _i847 < _list845.size; ++_i847) + org.apache.thrift.protocol.TList _list913 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list913.size); + Partition _elem914; + for (int _i915 = 0; _i915 < _list913.size; ++_i915) { - _elem846 = new Partition(); - _elem846.read(iprot); - struct.new_parts.add(_elem846); + _elem914 = new Partition(); + _elem914.read(iprot); + struct.new_parts.add(_elem914); } } struct.setNew_partsIsSet(true); @@ -89002,13 +89654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list848.size); - String _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list916 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list916.size); + String _elem917; + for (int _i918 = 0; _i918 < _list916.size; ++_i918) { - _elem849 = iprot.readString(); - struct.part_vals.add(_elem849); + _elem917 = iprot.readString(); + struct.part_vals.add(_elem917); } iprot.readListEnd(); } @@ -89053,9 +89705,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter851 : struct.part_vals) + for (String _iter919 : struct.part_vals) { - oprot.writeString(_iter851); + oprot.writeString(_iter919); } oprot.writeListEnd(); } @@ -89106,9 +89758,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter852 : struct.part_vals) + for (String _iter920 : struct.part_vals) { - oprot.writeString(_iter852); + oprot.writeString(_iter920); } } } @@ -89131,13 +89783,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list853.size); - String _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list921 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list921.size); + String _elem922; + for (int _i923 = 0; _i923 < _list921.size; ++_i923) { - _elem854 = iprot.readString(); - struct.part_vals.add(_elem854); + _elem922 = iprot.readString(); + struct.part_vals.add(_elem922); } } struct.setPart_valsIsSet(true); @@ -90011,13 +90663,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list856.size); - String _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list924 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list924.size); + String _elem925; + for (int _i926 = 0; _i926 < _list924.size; ++_i926) { - _elem857 = iprot.readString(); - struct.part_vals.add(_elem857); + _elem925 = iprot.readString(); + struct.part_vals.add(_elem925); } iprot.readListEnd(); } @@ -90051,9 +90703,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter859 : struct.part_vals) + for (String _iter927 : struct.part_vals) { - oprot.writeString(_iter859); + oprot.writeString(_iter927); } oprot.writeListEnd(); } @@ -90090,9 +90742,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter860 : struct.part_vals) + for (String _iter928 : struct.part_vals) { - oprot.writeString(_iter860); + oprot.writeString(_iter928); } } } @@ -90107,13 +90759,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list861.size); - String _elem862; - for (int _i863 = 0; _i863 < _list861.size; ++_i863) + org.apache.thrift.protocol.TList _list929 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list929.size); + String _elem930; + for (int _i931 = 0; _i931 < _list929.size; ++_i931) { - _elem862 = iprot.readString(); - struct.part_vals.add(_elem862); + _elem930 = iprot.readString(); + struct.part_vals.add(_elem930); } } struct.setPart_valsIsSet(true); @@ -92268,13 +92920,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.success = new ArrayList(_list864.size); - String _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list932 = iprot.readListBegin(); + struct.success = new ArrayList(_list932.size); + String _elem933; + for (int _i934 = 0; _i934 < _list932.size; ++_i934) { - _elem865 = iprot.readString(); - struct.success.add(_elem865); + _elem933 = iprot.readString(); + struct.success.add(_elem933); } iprot.readListEnd(); } @@ -92309,9 +92961,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter867 : struct.success) + for (String _iter935 : struct.success) { - oprot.writeString(_iter867); + oprot.writeString(_iter935); } oprot.writeListEnd(); } @@ -92350,9 +93002,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter868 : struct.success) + for (String _iter936 : struct.success) { - oprot.writeString(_iter868); + oprot.writeString(_iter936); } } } @@ -92367,13 +93019,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list869.size); - String _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list937 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list937.size); + String _elem938; + for (int _i939 = 0; _i939 < _list937.size; ++_i939) { - _elem870 = iprot.readString(); - struct.success.add(_elem870); + _elem938 = iprot.readString(); + struct.success.add(_elem938); } } struct.setSuccessIsSet(true); @@ -93136,15 +93788,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map872 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map872.size); - String _key873; - String _val874; - for (int _i875 = 0; _i875 < _map872.size; ++_i875) + org.apache.thrift.protocol.TMap _map940 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map940.size); + String _key941; + String _val942; + for (int _i943 = 0; _i943 < _map940.size; ++_i943) { - _key873 = iprot.readString(); - _val874 = iprot.readString(); - struct.success.put(_key873, _val874); + _key941 = iprot.readString(); + _val942 = iprot.readString(); + struct.success.put(_key941, _val942); } iprot.readMapEnd(); } @@ -93179,10 +93831,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter876 : struct.success.entrySet()) + for (Map.Entry _iter944 : struct.success.entrySet()) { - oprot.writeString(_iter876.getKey()); - oprot.writeString(_iter876.getValue()); + oprot.writeString(_iter944.getKey()); + oprot.writeString(_iter944.getValue()); } oprot.writeMapEnd(); } @@ -93221,10 +93873,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter877 : struct.success.entrySet()) + for (Map.Entry _iter945 : struct.success.entrySet()) { - oprot.writeString(_iter877.getKey()); - oprot.writeString(_iter877.getValue()); + oprot.writeString(_iter945.getKey()); + oprot.writeString(_iter945.getValue()); } } } @@ -93239,15 +93891,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map878 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map878.size); - String _key879; - String _val880; - for (int _i881 = 0; _i881 < _map878.size; ++_i881) + org.apache.thrift.protocol.TMap _map946 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map946.size); + String _key947; + String _val948; + for (int _i949 = 0; _i949 < _map946.size; ++_i949) { - _key879 = iprot.readString(); - _val880 = iprot.readString(); - struct.success.put(_key879, _val880); + _key947 = iprot.readString(); + _val948 = iprot.readString(); + struct.success.put(_key947, _val948); } } struct.setSuccessIsSet(true); @@ -93842,15 +94494,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map882 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map882.size); - String _key883; - String _val884; - for (int _i885 = 0; _i885 < _map882.size; ++_i885) + org.apache.thrift.protocol.TMap _map950 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map950.size); + String _key951; + String _val952; + for (int _i953 = 0; _i953 < _map950.size; ++_i953) { - _key883 = iprot.readString(); - _val884 = iprot.readString(); - struct.part_vals.put(_key883, _val884); + _key951 = iprot.readString(); + _val952 = iprot.readString(); + struct.part_vals.put(_key951, _val952); } iprot.readMapEnd(); } @@ -93894,10 +94546,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter886 : struct.part_vals.entrySet()) + for (Map.Entry _iter954 : struct.part_vals.entrySet()) { - oprot.writeString(_iter886.getKey()); - oprot.writeString(_iter886.getValue()); + oprot.writeString(_iter954.getKey()); + oprot.writeString(_iter954.getValue()); } oprot.writeMapEnd(); } @@ -93948,10 +94600,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter887 : struct.part_vals.entrySet()) + for (Map.Entry _iter955 : struct.part_vals.entrySet()) { - oprot.writeString(_iter887.getKey()); - oprot.writeString(_iter887.getValue()); + oprot.writeString(_iter955.getKey()); + oprot.writeString(_iter955.getValue()); } } } @@ -93974,15 +94626,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map888 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map888.size); - String _key889; - String _val890; - for (int _i891 = 0; _i891 < _map888.size; ++_i891) + org.apache.thrift.protocol.TMap _map956 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map956.size); + String _key957; + String _val958; + for (int _i959 = 0; _i959 < _map956.size; ++_i959) { - _key889 = iprot.readString(); - _val890 = iprot.readString(); - struct.part_vals.put(_key889, _val890); + _key957 = iprot.readString(); + _val958 = iprot.readString(); + struct.part_vals.put(_key957, _val958); } } struct.setPart_valsIsSet(true); @@ -95466,15 +96118,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map892 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map892.size); - String _key893; - String _val894; - for (int _i895 = 0; _i895 < _map892.size; ++_i895) + org.apache.thrift.protocol.TMap _map960 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map960.size); + String _key961; + String _val962; + for (int _i963 = 0; _i963 < _map960.size; ++_i963) { - _key893 = iprot.readString(); - _val894 = iprot.readString(); - struct.part_vals.put(_key893, _val894); + _key961 = iprot.readString(); + _val962 = iprot.readString(); + struct.part_vals.put(_key961, _val962); } iprot.readMapEnd(); } @@ -95518,10 +96170,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter896 : struct.part_vals.entrySet()) + for (Map.Entry _iter964 : struct.part_vals.entrySet()) { - oprot.writeString(_iter896.getKey()); - oprot.writeString(_iter896.getValue()); + oprot.writeString(_iter964.getKey()); + oprot.writeString(_iter964.getValue()); } oprot.writeMapEnd(); } @@ -95572,10 +96224,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter897 : struct.part_vals.entrySet()) + for (Map.Entry _iter965 : struct.part_vals.entrySet()) { - oprot.writeString(_iter897.getKey()); - oprot.writeString(_iter897.getValue()); + oprot.writeString(_iter965.getKey()); + oprot.writeString(_iter965.getValue()); } } } @@ -95598,15 +96250,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map898 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map898.size); - String _key899; - String _val900; - for (int _i901 = 0; _i901 < _map898.size; ++_i901) + org.apache.thrift.protocol.TMap _map966 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map966.size); + String _key967; + String _val968; + for (int _i969 = 0; _i969 < _map966.size; ++_i969) { - _key899 = iprot.readString(); - _val900 = iprot.readString(); - struct.part_vals.put(_key899, _val900); + _key967 = iprot.readString(); + _val968 = iprot.readString(); + struct.part_vals.put(_key967, _val968); } } struct.setPart_valsIsSet(true); @@ -102330,14 +102982,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list902 = iprot.readListBegin(); - struct.success = new ArrayList(_list902.size); - Index _elem903; - for (int _i904 = 0; _i904 < _list902.size; ++_i904) + org.apache.thrift.protocol.TList _list970 = iprot.readListBegin(); + struct.success = new ArrayList(_list970.size); + Index _elem971; + for (int _i972 = 0; _i972 < _list970.size; ++_i972) { - _elem903 = new Index(); - _elem903.read(iprot); - struct.success.add(_elem903); + _elem971 = new Index(); + _elem971.read(iprot); + struct.success.add(_elem971); } iprot.readListEnd(); } @@ -102381,9 +103033,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter905 : struct.success) + for (Index _iter973 : struct.success) { - _iter905.write(oprot); + _iter973.write(oprot); } oprot.writeListEnd(); } @@ -102430,9 +103082,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter906 : struct.success) + for (Index _iter974 : struct.success) { - _iter906.write(oprot); + _iter974.write(oprot); } } } @@ -102450,14 +103102,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list907 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list907.size); - Index _elem908; - for (int _i909 = 0; _i909 < _list907.size; ++_i909) + org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list975.size); + Index _elem976; + for (int _i977 = 0; _i977 < _list975.size; ++_i977) { - _elem908 = new Index(); - _elem908.read(iprot); - struct.success.add(_elem908); + _elem976 = new Index(); + _elem976.read(iprot); + struct.success.add(_elem976); } } struct.setSuccessIsSet(true); @@ -103436,13 +104088,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); - struct.success = new ArrayList(_list910.size); - String _elem911; - for (int _i912 = 0; _i912 < _list910.size; ++_i912) + org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); + struct.success = new ArrayList(_list978.size); + String _elem979; + for (int _i980 = 0; _i980 < _list978.size; ++_i980) { - _elem911 = iprot.readString(); - struct.success.add(_elem911); + _elem979 = iprot.readString(); + struct.success.add(_elem979); } iprot.readListEnd(); } @@ -103477,9 +104129,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter913 : struct.success) + for (String _iter981 : struct.success) { - oprot.writeString(_iter913); + oprot.writeString(_iter981); } oprot.writeListEnd(); } @@ -103518,9 +104170,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter914 : struct.success) + for (String _iter982 : struct.success) { - oprot.writeString(_iter914); + oprot.writeString(_iter982); } } } @@ -103535,13 +104187,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list915 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list915.size); - String _elem916; - for (int _i917 = 0; _i917 < _list915.size; ++_i917) + org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list983.size); + String _elem984; + for (int _i985 = 0; _i985 < _list983.size; ++_i985) { - _elem916 = iprot.readString(); - struct.success.add(_elem916); + _elem984 = iprot.readString(); + struct.success.add(_elem984); } } struct.setSuccessIsSet(true); @@ -119276,13 +119928,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list918 = iprot.readListBegin(); - struct.success = new ArrayList(_list918.size); - String _elem919; - for (int _i920 = 0; _i920 < _list918.size; ++_i920) + org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); + struct.success = new ArrayList(_list986.size); + String _elem987; + for (int _i988 = 0; _i988 < _list986.size; ++_i988) { - _elem919 = iprot.readString(); - struct.success.add(_elem919); + _elem987 = iprot.readString(); + struct.success.add(_elem987); } iprot.readListEnd(); } @@ -119317,9 +119969,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter921 : struct.success) + for (String _iter989 : struct.success) { - oprot.writeString(_iter921); + oprot.writeString(_iter989); } oprot.writeListEnd(); } @@ -119358,9 +120010,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter922 : struct.success) + for (String _iter990 : struct.success) { - oprot.writeString(_iter922); + oprot.writeString(_iter990); } } } @@ -119375,13 +120027,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list923 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list923.size); - String _elem924; - for (int _i925 = 0; _i925 < _list923.size; ++_i925) + org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list991.size); + String _elem992; + for (int _i993 = 0; _i993 < _list991.size; ++_i993) { - _elem924 = iprot.readString(); - struct.success.add(_elem924); + _elem992 = iprot.readString(); + struct.success.add(_elem992); } } struct.setSuccessIsSet(true); @@ -123436,13 +124088,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list926 = iprot.readListBegin(); - struct.success = new ArrayList(_list926.size); - String _elem927; - for (int _i928 = 0; _i928 < _list926.size; ++_i928) + org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); + struct.success = new ArrayList(_list994.size); + String _elem995; + for (int _i996 = 0; _i996 < _list994.size; ++_i996) { - _elem927 = iprot.readString(); - struct.success.add(_elem927); + _elem995 = iprot.readString(); + struct.success.add(_elem995); } iprot.readListEnd(); } @@ -123477,9 +124129,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter929 : struct.success) + for (String _iter997 : struct.success) { - oprot.writeString(_iter929); + oprot.writeString(_iter997); } oprot.writeListEnd(); } @@ -123518,9 +124170,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter930 : struct.success) + for (String _iter998 : struct.success) { - oprot.writeString(_iter930); + oprot.writeString(_iter998); } } } @@ -123535,13 +124187,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list931 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list931.size); - String _elem932; - for (int _i933 = 0; _i933 < _list931.size; ++_i933) + org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list999.size); + String _elem1000; + for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) { - _elem932 = iprot.readString(); - struct.success.add(_elem932); + _elem1000 = iprot.readString(); + struct.success.add(_elem1000); } } struct.setSuccessIsSet(true); @@ -126832,14 +127484,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list934 = iprot.readListBegin(); - struct.success = new ArrayList(_list934.size); - Role _elem935; - for (int _i936 = 0; _i936 < _list934.size; ++_i936) + org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); + struct.success = new ArrayList(_list1002.size); + Role _elem1003; + for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) { - _elem935 = new Role(); - _elem935.read(iprot); - struct.success.add(_elem935); + _elem1003 = new Role(); + _elem1003.read(iprot); + struct.success.add(_elem1003); } iprot.readListEnd(); } @@ -126874,9 +127526,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter937 : struct.success) + for (Role _iter1005 : struct.success) { - _iter937.write(oprot); + _iter1005.write(oprot); } oprot.writeListEnd(); } @@ -126915,9 +127567,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter938 : struct.success) + for (Role _iter1006 : struct.success) { - _iter938.write(oprot); + _iter1006.write(oprot); } } } @@ -126932,14 +127584,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list939.size); - Role _elem940; - for (int _i941 = 0; _i941 < _list939.size; ++_i941) + org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1007.size); + Role _elem1008; + for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) { - _elem940 = new Role(); - _elem940.read(iprot); - struct.success.add(_elem940); + _elem1008 = new Role(); + _elem1008.read(iprot); + struct.success.add(_elem1008); } } struct.setSuccessIsSet(true); @@ -129944,13 +130596,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list942.size); - String _elem943; - for (int _i944 = 0; _i944 < _list942.size; ++_i944) + org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1010.size); + String _elem1011; + for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) { - _elem943 = iprot.readString(); - struct.group_names.add(_elem943); + _elem1011 = iprot.readString(); + struct.group_names.add(_elem1011); } iprot.readListEnd(); } @@ -129986,9 +130638,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter945 : struct.group_names) + for (String _iter1013 : struct.group_names) { - oprot.writeString(_iter945); + oprot.writeString(_iter1013); } oprot.writeListEnd(); } @@ -130031,9 +130683,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter946 : struct.group_names) + for (String _iter1014 : struct.group_names) { - oprot.writeString(_iter946); + oprot.writeString(_iter1014); } } } @@ -130054,13 +130706,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list947.size); - String _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) + org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1015.size); + String _elem1016; + for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) { - _elem948 = iprot.readString(); - struct.group_names.add(_elem948); + _elem1016 = iprot.readString(); + struct.group_names.add(_elem1016); } } struct.setGroup_namesIsSet(true); @@ -131518,14 +132170,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.success = new ArrayList(_list950.size); - HiveObjectPrivilege _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); + struct.success = new ArrayList(_list1018.size); + HiveObjectPrivilege _elem1019; + for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) { - _elem951 = new HiveObjectPrivilege(); - _elem951.read(iprot); - struct.success.add(_elem951); + _elem1019 = new HiveObjectPrivilege(); + _elem1019.read(iprot); + struct.success.add(_elem1019); } iprot.readListEnd(); } @@ -131560,9 +132212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter953 : struct.success) + for (HiveObjectPrivilege _iter1021 : struct.success) { - _iter953.write(oprot); + _iter1021.write(oprot); } oprot.writeListEnd(); } @@ -131601,9 +132253,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter954 : struct.success) + for (HiveObjectPrivilege _iter1022 : struct.success) { - _iter954.write(oprot); + _iter1022.write(oprot); } } } @@ -131618,14 +132270,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list955.size); - HiveObjectPrivilege _elem956; - for (int _i957 = 0; _i957 < _list955.size; ++_i957) + org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1023.size); + HiveObjectPrivilege _elem1024; + for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) { - _elem956 = new HiveObjectPrivilege(); - _elem956.read(iprot); - struct.success.add(_elem956); + _elem1024 = new HiveObjectPrivilege(); + _elem1024.read(iprot); + struct.success.add(_elem1024); } } struct.setSuccessIsSet(true); @@ -134527,13 +135179,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list958.size); - String _elem959; - for (int _i960 = 0; _i960 < _list958.size; ++_i960) + org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1026.size); + String _elem1027; + for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) { - _elem959 = iprot.readString(); - struct.group_names.add(_elem959); + _elem1027 = iprot.readString(); + struct.group_names.add(_elem1027); } iprot.readListEnd(); } @@ -134564,9 +135216,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter961 : struct.group_names) + for (String _iter1029 : struct.group_names) { - oprot.writeString(_iter961); + oprot.writeString(_iter1029); } oprot.writeListEnd(); } @@ -134603,9 +135255,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter962 : struct.group_names) + for (String _iter1030 : struct.group_names) { - oprot.writeString(_iter962); + oprot.writeString(_iter1030); } } } @@ -134621,13 +135273,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list963.size); - String _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1031.size); + String _elem1032; + for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) { - _elem964 = iprot.readString(); - struct.group_names.add(_elem964); + _elem1032 = iprot.readString(); + struct.group_names.add(_elem1032); } } struct.setGroup_namesIsSet(true); @@ -135030,13 +135682,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); - struct.success = new ArrayList(_list966.size); - String _elem967; - for (int _i968 = 0; _i968 < _list966.size; ++_i968) + org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); + struct.success = new ArrayList(_list1034.size); + String _elem1035; + for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) { - _elem967 = iprot.readString(); - struct.success.add(_elem967); + _elem1035 = iprot.readString(); + struct.success.add(_elem1035); } iprot.readListEnd(); } @@ -135071,9 +135723,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter969 : struct.success) + for (String _iter1037 : struct.success) { - oprot.writeString(_iter969); + oprot.writeString(_iter1037); } oprot.writeListEnd(); } @@ -135112,9 +135764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter970 : struct.success) + for (String _iter1038 : struct.success) { - oprot.writeString(_iter970); + oprot.writeString(_iter1038); } } } @@ -135129,13 +135781,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list971.size); - String _elem972; - for (int _i973 = 0; _i973 < _list971.size; ++_i973) + org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1039.size); + String _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem972 = iprot.readString(); - struct.success.add(_elem972); + _elem1040 = iprot.readString(); + struct.success.add(_elem1040); } } struct.setSuccessIsSet(true); @@ -146386,16 +147038,3515 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CompactionRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CompactionRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); + } + + public compact_args() { + } + + public compact_args( + CompactionRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public compact_args(compact_args other) { + if (other.isSetRqst()) { + this.rqst = new CompactionRequest(other.rqst); + } + } + + public compact_args deepCopy() { + return new compact_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public CompactionRequest getRqst() { + return this.rqst; + } + + public void setRqst(CompactionRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((CompactionRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof compact_args) + return this.equals((compact_args)that); + return false; + } + + public boolean equals(compact_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(compact_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("compact_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class compact_argsStandardSchemeFactory implements SchemeFactory { + public compact_argsStandardScheme getScheme() { + return new compact_argsStandardScheme(); + } + } + + private static class compact_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class compact_argsTupleSchemeFactory implements SchemeFactory { + public compact_argsTupleScheme getScheme() { + return new compact_argsTupleScheme(); + } + } + + private static class compact_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + } + + public compact_result() { + } + + /** + * Performs a deep copy on other. + */ + public compact_result(compact_result other) { + } + + public compact_result deepCopy() { + return new compact_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof compact_result) + return this.equals((compact_result)that); + return false; + } + + public boolean equals(compact_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(compact_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("compact_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class compact_resultStandardSchemeFactory implements SchemeFactory { + public compact_resultStandardScheme getScheme() { + return new compact_resultStandardScheme(); + } + } + + private static class compact_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class compact_resultTupleSchemeFactory implements SchemeFactory { + public compact_resultTupleScheme getScheme() { + return new compact_resultTupleScheme(); + } + } + + private static class compact_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); + } + + private ShowCompactRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); + } + + public show_compact_args() { + } + + public show_compact_args( + ShowCompactRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public show_compact_args(show_compact_args other) { + if (other.isSetRqst()) { + this.rqst = new ShowCompactRequest(other.rqst); + } + } + + public show_compact_args deepCopy() { + return new show_compact_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public ShowCompactRequest getRqst() { + return this.rqst; + } + + public void setRqst(ShowCompactRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((ShowCompactRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_compact_args) + return this.equals((show_compact_args)that); + return false; + } + + public boolean equals(show_compact_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(show_compact_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_compact_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { + public show_compact_argsStandardScheme getScheme() { + return new show_compact_argsStandardScheme(); + } + } + + private static class show_compact_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new ShowCompactRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { + public show_compact_argsTupleScheme getScheme() { + return new show_compact_argsTupleScheme(); + } + } + + private static class show_compact_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new ShowCompactRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); + } + + private ShowCompactResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); + } + + public show_compact_result() { + } + + public show_compact_result( + ShowCompactResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public show_compact_result(show_compact_result other) { + if (other.isSetSuccess()) { + this.success = new ShowCompactResponse(other.success); + } + } + + public show_compact_result deepCopy() { + return new show_compact_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public ShowCompactResponse getSuccess() { + return this.success; + } + + public void setSuccess(ShowCompactResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((ShowCompactResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_compact_result) + return this.equals((show_compact_result)that); + return false; + } + + public boolean equals(show_compact_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(show_compact_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_compact_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { + public show_compact_resultStandardScheme getScheme() { + return new show_compact_resultStandardScheme(); + } + } + + private static class show_compact_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ShowCompactResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { + public show_compact_resultTupleScheme getScheme() { + return new show_compact_resultTupleScheme(); + } + } + + private static class show_compact_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new ShowCompactResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); + } + + private AddDynamicPartitions rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); + } + + public add_dynamic_partitions_args() { + } + + public add_dynamic_partitions_args( + AddDynamicPartitions rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public add_dynamic_partitions_args(add_dynamic_partitions_args other) { + if (other.isSetRqst()) { + this.rqst = new AddDynamicPartitions(other.rqst); + } + } + + public add_dynamic_partitions_args deepCopy() { + return new add_dynamic_partitions_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public AddDynamicPartitions getRqst() { + return this.rqst; + } + + public void setRqst(AddDynamicPartitions rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((AddDynamicPartitions)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_dynamic_partitions_args) + return this.equals((add_dynamic_partitions_args)that); + return false; + } + + public boolean equals(add_dynamic_partitions_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(add_dynamic_partitions_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsStandardScheme getScheme() { + return new add_dynamic_partitions_argsStandardScheme(); + } + } + + private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsTupleScheme getScheme() { + return new add_dynamic_partitions_argsTupleScheme(); + } + } + + private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); + } + + private NoSuchTxnException o1; // required + private TxnAbortedException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); + } + + public add_dynamic_partitions_result() { + } + + public add_dynamic_partitions_result( + NoSuchTxnException o1, + TxnAbortedException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public add_dynamic_partitions_result(add_dynamic_partitions_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new TxnAbortedException(other.o2); + } + } + + public add_dynamic_partitions_result deepCopy() { + return new add_dynamic_partitions_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public NoSuchTxnException getO1() { + return this.o1; + } + + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public TxnAbortedException getO2() { + return this.o2; + } + + public void setO2(TxnAbortedException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchTxnException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((TxnAbortedException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_dynamic_partitions_result) + return this.equals((add_dynamic_partitions_result)that); + return false; + } + + public boolean equals(add_dynamic_partitions_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(add_dynamic_partitions_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultStandardScheme getScheme() { + return new add_dynamic_partitions_resultStandardScheme(); + } + } + + private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultTupleScheme getScheme() { + return new add_dynamic_partitions_resultTupleScheme(); + } + } + + private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); + } + + private NotificationEventRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); + } + + public get_next_notification_args() { + } + + public get_next_notification_args( + NotificationEventRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public get_next_notification_args(get_next_notification_args other) { + if (other.isSetRqst()) { + this.rqst = new NotificationEventRequest(other.rqst); + } + } + + public get_next_notification_args deepCopy() { + return new get_next_notification_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public NotificationEventRequest getRqst() { + return this.rqst; + } + + public void setRqst(NotificationEventRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((NotificationEventRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_next_notification_args) + return this.equals((get_next_notification_args)that); + return false; + } + + public boolean equals(get_next_notification_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(get_next_notification_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_next_notification_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { + public get_next_notification_argsStandardScheme getScheme() { + return new get_next_notification_argsStandardScheme(); + } + } + + private static class get_next_notification_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new NotificationEventRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { + public get_next_notification_argsTupleScheme getScheme() { + return new get_next_notification_argsTupleScheme(); + } + } + + private static class get_next_notification_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new NotificationEventRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); + } + + private NotificationEventResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); + } + + public get_next_notification_result() { + } + + public get_next_notification_result( + NotificationEventResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_next_notification_result(get_next_notification_result other) { + if (other.isSetSuccess()) { + this.success = new NotificationEventResponse(other.success); + } + } + + public get_next_notification_result deepCopy() { + return new get_next_notification_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public NotificationEventResponse getSuccess() { + return this.success; + } + + public void setSuccess(NotificationEventResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((NotificationEventResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_next_notification_result) + return this.equals((get_next_notification_result)that); + return false; + } + + public boolean equals(get_next_notification_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_next_notification_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_next_notification_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { + public get_next_notification_resultStandardScheme getScheme() { + return new get_next_notification_resultStandardScheme(); + } + } + + private static class get_next_notification_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new NotificationEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { + public get_next_notification_resultTupleScheme getScheme() { + return new get_next_notification_resultTupleScheme(); + } + } + + private static class get_next_notification_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new NotificationEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); + } + + public get_current_notificationEventId_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_current_notificationEventId_args(get_current_notificationEventId_args other) { + } + + public get_current_notificationEventId_args deepCopy() { + return new get_current_notificationEventId_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_current_notificationEventId_args) + return this.equals((get_current_notificationEventId_args)that); + return false; + } + + public boolean equals(get_current_notificationEventId_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_current_notificationEventId_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsStandardScheme getScheme() { + return new get_current_notificationEventId_argsStandardScheme(); + } + } + + private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsTupleScheme getScheme() { + return new get_current_notificationEventId_argsTupleScheme(); + } + } + + private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); + } + + private CurrentNotificationEventId success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); + } + + public get_current_notificationEventId_result() { + } + + public get_current_notificationEventId_result( + CurrentNotificationEventId success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public get_current_notificationEventId_result(get_current_notificationEventId_result other) { + if (other.isSetSuccess()) { + this.success = new CurrentNotificationEventId(other.success); + } + } + + public get_current_notificationEventId_result deepCopy() { + return new get_current_notificationEventId_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public CurrentNotificationEventId getSuccess() { + return this.success; + } + + public void setSuccess(CurrentNotificationEventId success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((CurrentNotificationEventId)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_current_notificationEventId_result) + return this.equals((get_current_notificationEventId_result)that); + return false; + } + + public boolean equals(get_current_notificationEventId_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(get_current_notificationEventId_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultStandardScheme getScheme() { + return new get_current_notificationEventId_resultStandardScheme(); + } + } + + private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new CurrentNotificationEventId(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultTupleScheme getScheme() { + return new get_current_notificationEventId_resultTupleScheme(); + } + } + + private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new CurrentNotificationEventId(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); + } + + private FireEventRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); } - public compact_args() { + public fire_listener_event_args() { } - public compact_args( - CompactionRequest rqst) + public fire_listener_event_args( + FireEventRequest rqst) { this(); this.rqst = rqst; @@ -146404,14 +150555,14 @@ public compact_args( /** * Performs a deep copy on other. */ - public compact_args(compact_args other) { + public fire_listener_event_args(fire_listener_event_args other) { if (other.isSetRqst()) { - this.rqst = new CompactionRequest(other.rqst); + this.rqst = new FireEventRequest(other.rqst); } } - public compact_args deepCopy() { - return new compact_args(this); + public fire_listener_event_args deepCopy() { + return new fire_listener_event_args(this); } @Override @@ -146419,11 +150570,11 @@ public void clear() { this.rqst = null; } - public CompactionRequest getRqst() { + public FireEventRequest getRqst() { return this.rqst; } - public void setRqst(CompactionRequest rqst) { + public void setRqst(FireEventRequest rqst) { this.rqst = rqst; } @@ -146448,7 +150599,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((CompactionRequest)value); + setRqst((FireEventRequest)value); } break; @@ -146481,12 +150632,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof compact_args) - return this.equals((compact_args)that); + if (that instanceof fire_listener_event_args) + return this.equals((fire_listener_event_args)that); return false; } - public boolean equals(compact_args that) { + public boolean equals(fire_listener_event_args that) { if (that == null) return false; @@ -146515,7 +150666,7 @@ public int hashCode() { } @Override - public int compareTo(compact_args other) { + public int compareTo(fire_listener_event_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -146549,7 +150700,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_args("); + StringBuilder sb = new StringBuilder("fire_listener_event_args("); boolean first = true; sb.append("rqst:"); @@ -146587,15 +150738,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_argsStandardSchemeFactory implements SchemeFactory { - public compact_argsStandardScheme getScheme() { - return new compact_argsStandardScheme(); + private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_argsStandardScheme getScheme() { + return new fire_listener_event_argsStandardScheme(); } } - private static class compact_argsStandardScheme extends StandardScheme { + private static class fire_listener_event_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -146607,7 +150758,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new CompactionRequest(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -146623,7 +150774,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -146638,16 +150789,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struc } - private static class compact_argsTupleSchemeFactory implements SchemeFactory { - public compact_argsTupleScheme getScheme() { - return new compact_argsTupleScheme(); + private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_argsTupleScheme getScheme() { + return new fire_listener_event_argsTupleScheme(); } } - private static class compact_argsTupleScheme extends TupleScheme { + private static class fire_listener_event_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -146660,11 +150811,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new CompactionRequest(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -146673,20 +150824,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) } - public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); } + private FireEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -146701,6 +150854,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -146739,37 +150894,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); } - public compact_result() { + public fire_listener_event_result() { + } + + public fire_listener_event_result( + FireEventResponse success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public compact_result(compact_result other) { + public fire_listener_event_result(fire_listener_event_result other) { + if (other.isSetSuccess()) { + this.success = new FireEventResponse(other.success); + } } - public compact_result deepCopy() { - return new compact_result(this); + public fire_listener_event_result deepCopy() { + return new fire_listener_event_result(this); } @Override public void clear() { + this.success = null; + } + + public FireEventResponse getSuccess() { + return this.success; + } + + public void setSuccess(FireEventResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((FireEventResponse)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -146781,6 +150985,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -146789,15 +150995,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof compact_result) - return this.equals((compact_result)that); + if (that instanceof fire_listener_event_result) + return this.equals((fire_listener_event_result)that); return false; } - public boolean equals(compact_result that) { + public boolean equals(fire_listener_event_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -146805,17 +151020,32 @@ public boolean equals(compact_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(compact_result other) { + public int compareTo(fire_listener_event_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -146833,9 +151063,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_result("); + StringBuilder sb = new StringBuilder("fire_listener_event_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -146843,6 +151080,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -146861,15 +151101,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_resultStandardSchemeFactory implements SchemeFactory { - public compact_resultStandardScheme getScheme() { - return new compact_resultStandardScheme(); + private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_resultStandardScheme getScheme() { + return new fire_listener_event_resultStandardScheme(); } } - private static class compact_resultStandardScheme extends StandardScheme { + private static class fire_listener_event_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -146879,6 +151119,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new FireEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -146888,53 +151137,70 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class compact_resultTupleSchemeFactory implements SchemeFactory { - public compact_resultTupleScheme getScheme() { - return new compact_resultTupleScheme(); + private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_resultTupleScheme getScheme() { + return new fire_listener_event_resultTupleScheme(); } } - private static class compact_resultTupleScheme extends TupleScheme { + private static class fire_listener_event_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new FireEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); + public static class flushCache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory()); } - private ShowCompactRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); +; private static final Map byName = new HashMap(); @@ -146949,8 +151215,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struc */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; default: return null; } @@ -146989,86 +151253,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap); } - public show_compact_args() { - } - - public show_compact_args( - ShowCompactRequest rqst) - { - this(); - this.rqst = rqst; + public flushCache_args() { } /** * Performs a deep copy on other. */ - public show_compact_args(show_compact_args other) { - if (other.isSetRqst()) { - this.rqst = new ShowCompactRequest(other.rqst); - } + public flushCache_args(flushCache_args other) { } - public show_compact_args deepCopy() { - return new show_compact_args(this); + public flushCache_args deepCopy() { + return new flushCache_args(this); } @Override public void clear() { - this.rqst = null; - } - - public ShowCompactRequest getRqst() { - return this.rqst; - } - - public void setRqst(ShowCompactRequest rqst) { - this.rqst = rqst; - } - - public void unsetRqst() { - this.rqst = null; - } - - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; - } - - public void setRqstIsSet(boolean value) { - if (!value) { - this.rqst = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: - if (value == null) { - unsetRqst(); - } else { - setRqst((ShowCompactRequest)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); - } throw new IllegalStateException(); } @@ -147080,8 +151295,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); } throw new IllegalStateException(); } @@ -147090,24 +151303,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_args) - return this.equals((show_compact_args)that); + if (that instanceof flushCache_args) + return this.equals((flushCache_args)that); return false; } - public boolean equals(show_compact_args that) { + public boolean equals(flushCache_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) - return false; - if (!this.rqst.equals(that.rqst)) - return false; - } - return true; } @@ -147115,32 +151319,17 @@ public boolean equals(show_compact_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); - return list.hashCode(); } @Override - public int compareTo(show_compact_args other) { + public int compareTo(flushCache_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -147158,16 +151347,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_args("); + StringBuilder sb = new StringBuilder("flushCache_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { - sb.append("null"); - } else { - sb.append(this.rqst); - } - first = false; sb.append(")"); return sb.toString(); } @@ -147175,9 +151357,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -147196,15 +151375,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { - public show_compact_argsStandardScheme getScheme() { - return new show_compact_argsStandardScheme(); + private static class flushCache_argsStandardSchemeFactory implements SchemeFactory { + public flushCache_argsStandardScheme getScheme() { + return new flushCache_argsStandardScheme(); } } - private static class show_compact_argsStandardScheme extends StandardScheme { + private static class flushCache_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -147214,15 +151393,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s break; } switch (schemeField.id) { - case 1: // RQST - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new ShowCompactRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -147232,72 +151402,51 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { - public show_compact_argsTupleScheme getScheme() { - return new show_compact_argsTupleScheme(); + private static class flushCache_argsTupleSchemeFactory implements SchemeFactory { + public flushCache_argsTupleScheme getScheme() { + return new flushCache_argsTupleScheme(); } } - private static class show_compact_argsTupleScheme extends TupleScheme { + private static class flushCache_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.rqst = new ShowCompactRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } } } } - public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); + public static class flushCache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory()); } - private ShowCompactResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); +; private static final Map byName = new HashMap(); @@ -147312,8 +151461,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args st */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; default: return null; } @@ -147352,86 +151499,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); } - public show_compact_result() { - } - - public show_compact_result( - ShowCompactResponse success) - { - this(); - this.success = success; + public flushCache_result() { } /** * Performs a deep copy on other. */ - public show_compact_result(show_compact_result other) { - if (other.isSetSuccess()) { - this.success = new ShowCompactResponse(other.success); - } + public flushCache_result(flushCache_result other) { } - public show_compact_result deepCopy() { - return new show_compact_result(this); + public flushCache_result deepCopy() { + return new flushCache_result(this); } @Override public void clear() { - this.success = null; - } - - public ShowCompactResponse getSuccess() { - return this.success; - } - - public void setSuccess(ShowCompactResponse success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((ShowCompactResponse)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); - } throw new IllegalStateException(); } @@ -147443,8 +151541,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); } throw new IllegalStateException(); } @@ -147453,24 +151549,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_result) - return this.equals((show_compact_result)that); + if (that instanceof flushCache_result) + return this.equals((flushCache_result)that); return false; } - public boolean equals(show_compact_result that) { + public boolean equals(flushCache_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - return true; } @@ -147478,32 +151565,17 @@ public boolean equals(show_compact_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - return list.hashCode(); } @Override - public int compareTo(show_compact_result other) { + public int compareTo(flushCache_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -147521,16 +151593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_result("); + StringBuilder sb = new StringBuilder("flushCache_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; sb.append(")"); return sb.toString(); } @@ -147538,9 +151603,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -147559,15 +151621,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { - public show_compact_resultStandardScheme getScheme() { - return new show_compact_resultStandardScheme(); + private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { + public flushCache_resultStandardScheme getScheme() { + return new flushCache_resultStandardScheme(); } } - private static class show_compact_resultStandardScheme extends StandardScheme { + private static class flushCache_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -147577,15 +151639,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ShowCompactResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -147595,72 +151648,53 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { - public show_compact_resultTupleScheme getScheme() { - return new show_compact_resultTupleScheme(); + private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { + public flushCache_resultTupleScheme getScheme() { + return new flushCache_resultTupleScheme(); } } - private static class show_compact_resultTupleScheme extends TupleScheme { + private static class flushCache_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new ShowCompactResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } } } } - public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); + public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); } - private AddDynamicPartitions rqst; // required + private GetFileMetadataByExprRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -147675,8 +151709,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -147720,70 +151754,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); } - public add_dynamic_partitions_args() { + public get_file_metadata_by_expr_args() { } - public add_dynamic_partitions_args( - AddDynamicPartitions rqst) + public get_file_metadata_by_expr_args( + GetFileMetadataByExprRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public add_dynamic_partitions_args(add_dynamic_partitions_args other) { - if (other.isSetRqst()) { - this.rqst = new AddDynamicPartitions(other.rqst); + public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataByExprRequest(other.req); } } - public add_dynamic_partitions_args deepCopy() { - return new add_dynamic_partitions_args(this); + public get_file_metadata_by_expr_args deepCopy() { + return new get_file_metadata_by_expr_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public AddDynamicPartitions getRqst() { - return this.rqst; + public GetFileMetadataByExprRequest getReq() { + return this.req; } - public void setRqst(AddDynamicPartitions rqst) { - this.rqst = rqst; + public void setReq(GetFileMetadataByExprRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((AddDynamicPartitions)value); + setReq((GetFileMetadataByExprRequest)value); } break; @@ -147792,8 +151826,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -147806,8 +151840,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -147816,21 +151850,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_args) - return this.equals((add_dynamic_partitions_args)that); + if (that instanceof get_file_metadata_by_expr_args) + return this.equals((get_file_metadata_by_expr_args)that); return false; } - public boolean equals(add_dynamic_partitions_args that) { + public boolean equals(get_file_metadata_by_expr_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -147841,28 +151875,28 @@ public boolean equals(add_dynamic_partitions_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(add_dynamic_partitions_args other) { + public int compareTo(get_file_metadata_by_expr_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -147884,14 +151918,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -147901,8 +151935,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -147922,15 +151956,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsStandardScheme getScheme() { - return new add_dynamic_partitions_argsStandardScheme(); + private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsStandardScheme getScheme() { + return new get_file_metadata_by_expr_argsStandardScheme(); } } - private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -147940,11 +151974,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AddDynamicPartitions(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -147958,13 +151992,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -147973,60 +152007,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partit } - private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsTupleScheme getScheme() { - return new add_dynamic_partitions_argsTupleScheme(); + private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsTupleScheme getScheme() { + return new get_file_metadata_by_expr_argsTupleScheme(); } } - private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AddDynamicPartitions(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); + public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); } - private NoSuchTxnException o1; // required - private TxnAbortedException o2; // required + private GetFileMetadataByExprResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"), - O2((short)2, "o2"); + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -148041,10 +152072,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitio */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // O1 - return O1; - case 2: // O2 - return O2; + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -148088,109 +152117,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); } - public add_dynamic_partitions_result() { + public get_file_metadata_by_expr_result() { } - public add_dynamic_partitions_result( - NoSuchTxnException o1, - TxnAbortedException o2) + public get_file_metadata_by_expr_result( + GetFileMetadataByExprResult success) { this(); - this.o1 = o1; - this.o2 = o2; + this.success = success; } /** * Performs a deep copy on other. */ - public add_dynamic_partitions_result(add_dynamic_partitions_result other) { - if (other.isSetO1()) { - this.o1 = new NoSuchTxnException(other.o1); - } - if (other.isSetO2()) { - this.o2 = new TxnAbortedException(other.o2); + public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { + if (other.isSetSuccess()) { + this.success = new GetFileMetadataByExprResult(other.success); } } - public add_dynamic_partitions_result deepCopy() { - return new add_dynamic_partitions_result(this); + public get_file_metadata_by_expr_result deepCopy() { + return new get_file_metadata_by_expr_result(this); } @Override public void clear() { - this.o1 = null; - this.o2 = null; - } - - public NoSuchTxnException getO1() { - return this.o1; - } - - public void setO1(NoSuchTxnException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } + this.success = null; } - public TxnAbortedException getO2() { - return this.o2; + public GetFileMetadataByExprResult getSuccess() { + return this.success; } - public void setO2(TxnAbortedException o2) { - this.o2 = o2; + public void setSuccess(GetFileMetadataByExprResult success) { + this.success = success; } - public void unsetO2() { - this.o2 = null; + public void unsetSuccess() { + this.success = null; } - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; } - public void setO2IsSet(boolean value) { + public void setSuccessIsSet(boolean value) { if (!value) { - this.o2 = null; + this.success = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((NoSuchTxnException)value); - } - break; - - case O2: + case SUCCESS: if (value == null) { - unsetO2(); + unsetSuccess(); } else { - setO2((TxnAbortedException)value); + setSuccess((GetFileMetadataByExprResult)value); } break; @@ -148199,11 +152189,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case O1: - return getO1(); - - case O2: - return getO2(); + case SUCCESS: + return getSuccess(); } throw new IllegalStateException(); @@ -148216,10 +152203,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case O1: - return isSetO1(); - case O2: - return isSetO2(); + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -148228,30 +152213,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_result) - return this.equals((add_dynamic_partitions_result)that); + if (that instanceof get_file_metadata_by_expr_result) + return this.equals((get_file_metadata_by_expr_result)that); return false; } - public boolean equals(add_dynamic_partitions_result that) { + public boolean equals(get_file_metadata_by_expr_result that) { if (that == null) return false; - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) return false; - if (!this.o2.equals(that.o2)) + if (!this.success.equals(that.success)) return false; } @@ -148262,43 +152238,28 @@ public boolean equals(add_dynamic_partitions_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); return list.hashCode(); } @Override - public int compareTo(add_dynamic_partitions_result other) { + public int compareTo(get_file_metadata_by_expr_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); if (lastComparison != 0) { return lastComparison; } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); if (lastComparison != 0) { return lastComparison; } @@ -148320,22 +152281,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); boolean first = true; - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { + sb.append("success:"); + if (this.success == null) { sb.append("null"); } else { - sb.append(this.o2); + sb.append(this.success); } first = false; sb.append(")"); @@ -148345,6 +152298,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -148363,15 +152319,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultStandardScheme getScheme() { - return new add_dynamic_partitions_resultStandardScheme(); + private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultStandardScheme getScheme() { + return new get_file_metadata_by_expr_resultStandardScheme(); } } - private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -148381,20 +152337,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti break; } switch (schemeField.id) { - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 + case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); + struct.success = new GetFileMetadataByExprResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -148408,18 +152355,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -148428,68 +152370,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partit } - private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultTupleScheme getScheme() { - return new add_dynamic_partitions_resultTupleScheme(); + private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultTupleScheme getScheme() { + return new get_file_metadata_by_expr_resultTupleScheme(); } } - private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetO1()) { - struct.o1.write(oprot); - } - if (struct.isSetO2()) { - struct.o2.write(oprot); + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(1)) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); + struct.success = new GetFileMetadataByExprResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); } } } } - public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); + public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); } - private NotificationEventRequest rqst; // required + private GetFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -148504,8 +152435,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitio */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -148549,70 +152480,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); } - public get_next_notification_args() { + public get_file_metadata_args() { } - public get_next_notification_args( - NotificationEventRequest rqst) + public get_file_metadata_args( + GetFileMetadataRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public get_next_notification_args(get_next_notification_args other) { - if (other.isSetRqst()) { - this.rqst = new NotificationEventRequest(other.rqst); + public get_file_metadata_args(get_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataRequest(other.req); } } - public get_next_notification_args deepCopy() { - return new get_next_notification_args(this); + public get_file_metadata_args deepCopy() { + return new get_file_metadata_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public NotificationEventRequest getRqst() { - return this.rqst; + public GetFileMetadataRequest getReq() { + return this.req; } - public void setRqst(NotificationEventRequest rqst) { - this.rqst = rqst; + public void setReq(GetFileMetadataRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((NotificationEventRequest)value); + setReq((GetFileMetadataRequest)value); } break; @@ -148621,8 +152552,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -148635,8 +152566,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -148645,21 +152576,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_args) - return this.equals((get_next_notification_args)that); + if (that instanceof get_file_metadata_args) + return this.equals((get_file_metadata_args)that); return false; } - public boolean equals(get_next_notification_args that) { + public boolean equals(get_file_metadata_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -148670,28 +152601,28 @@ public boolean equals(get_next_notification_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(get_next_notification_args other) { + public int compareTo(get_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -148713,14 +152644,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -148730,8 +152661,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -148751,15 +152682,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { - public get_next_notification_argsStandardScheme getScheme() { - return new get_next_notification_argsStandardScheme(); + private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_argsStandardScheme getScheme() { + return new get_file_metadata_argsStandardScheme(); } } - private static class get_next_notification_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -148769,11 +152700,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -148787,13 +152718,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -148802,53 +152733,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notificat } - private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { - public get_next_notification_argsTupleScheme getScheme() { - return new get_next_notification_argsTupleScheme(); + private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_argsTupleScheme getScheme() { + return new get_file_metadata_argsTupleScheme(); } } - private static class get_next_notification_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); + public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); } - private NotificationEventResponse success; // required + private GetFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -148913,16 +152844,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); } - public get_next_notification_result() { + public get_file_metadata_result() { } - public get_next_notification_result( - NotificationEventResponse success) + public get_file_metadata_result( + GetFileMetadataResult success) { this(); this.success = success; @@ -148931,14 +152862,14 @@ public get_next_notification_result( /** * Performs a deep copy on other. */ - public get_next_notification_result(get_next_notification_result other) { + public get_file_metadata_result(get_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new NotificationEventResponse(other.success); + this.success = new GetFileMetadataResult(other.success); } } - public get_next_notification_result deepCopy() { - return new get_next_notification_result(this); + public get_file_metadata_result deepCopy() { + return new get_file_metadata_result(this); } @Override @@ -148946,11 +152877,11 @@ public void clear() { this.success = null; } - public NotificationEventResponse getSuccess() { + public GetFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(NotificationEventResponse success) { + public void setSuccess(GetFileMetadataResult success) { this.success = success; } @@ -148975,7 +152906,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((NotificationEventResponse)value); + setSuccess((GetFileMetadataResult)value); } break; @@ -149008,12 +152939,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_result) - return this.equals((get_next_notification_result)that); + if (that instanceof get_file_metadata_result) + return this.equals((get_file_metadata_result)that); return false; } - public boolean equals(get_next_notification_result that) { + public boolean equals(get_file_metadata_result that) { if (that == null) return false; @@ -149042,7 +152973,7 @@ public int hashCode() { } @Override - public int compareTo(get_next_notification_result other) { + public int compareTo(get_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -149076,7 +153007,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -149114,15 +153045,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { - public get_next_notification_resultStandardScheme getScheme() { - return new get_next_notification_resultStandardScheme(); + private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_resultStandardScheme getScheme() { + return new get_file_metadata_resultStandardScheme(); } } - private static class get_next_notification_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -149134,7 +153065,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new NotificationEventResponse(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -149150,7 +153081,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -149165,16 +153096,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notificat } - private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { - public get_next_notification_resultTupleScheme getScheme() { - return new get_next_notification_resultTupleScheme(); + private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_resultTupleScheme getScheme() { + return new get_file_metadata_resultTupleScheme(); } } - private static class get_next_notification_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -149187,11 +153118,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notificati } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new NotificationEventResponse(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -149200,20 +153131,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notificatio } - public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); + public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); } + private PutFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -149228,6 +153161,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notificatio */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // REQ + return REQ; default: return null; } @@ -149266,37 +153201,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); } - public get_current_notificationEventId_args() { + public put_file_metadata_args() { + } + + public put_file_metadata_args( + PutFileMetadataRequest req) + { + this(); + this.req = req; } /** * Performs a deep copy on other. */ - public get_current_notificationEventId_args(get_current_notificationEventId_args other) { + public put_file_metadata_args(put_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new PutFileMetadataRequest(other.req); + } } - public get_current_notificationEventId_args deepCopy() { - return new get_current_notificationEventId_args(this); + public put_file_metadata_args deepCopy() { + return new put_file_metadata_args(this); } @Override public void clear() { + this.req = null; + } + + public PutFileMetadataRequest getReq() { + return this.req; + } + + public void setReq(PutFileMetadataRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((PutFileMetadataRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case REQ: + return getReq(); + } throw new IllegalStateException(); } @@ -149308,6 +153292,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -149316,15 +153302,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_args) - return this.equals((get_current_notificationEventId_args)that); + if (that instanceof put_file_metadata_args) + return this.equals((put_file_metadata_args)that); return false; } - public boolean equals(get_current_notificationEventId_args that) { + public boolean equals(put_file_metadata_args that) { if (that == null) return false; + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + return true; } @@ -149332,17 +153327,32 @@ public boolean equals(get_current_notificationEventId_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + return list.hashCode(); } @Override - public int compareTo(get_current_notificationEventId_args other) { + public int compareTo(put_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -149360,9 +153370,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); + StringBuilder sb = new StringBuilder("put_file_metadata_args("); boolean first = true; + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; sb.append(")"); return sb.toString(); } @@ -149370,6 +153387,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -149388,15 +153408,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsStandardScheme getScheme() { - return new get_current_notificationEventId_argsStandardScheme(); + private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_argsStandardScheme getScheme() { + return new put_file_metadata_argsStandardScheme(); } } - private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { + private static class put_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -149406,6 +153426,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific break; } switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -149415,49 +153444,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsTupleScheme getScheme() { - return new get_current_notificationEventId_argsTupleScheme(); + private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_argsTupleScheme getScheme() { + return new put_file_metadata_argsTupleScheme(); } } - private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { + private static class put_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } } } } - public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); + public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); } - private CurrentNotificationEventId success; // required + private PutFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -149522,16 +153570,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); } - public get_current_notificationEventId_result() { + public put_file_metadata_result() { } - public get_current_notificationEventId_result( - CurrentNotificationEventId success) + public put_file_metadata_result( + PutFileMetadataResult success) { this(); this.success = success; @@ -149540,14 +153588,14 @@ public get_current_notificationEventId_result( /** * Performs a deep copy on other. */ - public get_current_notificationEventId_result(get_current_notificationEventId_result other) { + public put_file_metadata_result(put_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new CurrentNotificationEventId(other.success); + this.success = new PutFileMetadataResult(other.success); } } - public get_current_notificationEventId_result deepCopy() { - return new get_current_notificationEventId_result(this); + public put_file_metadata_result deepCopy() { + return new put_file_metadata_result(this); } @Override @@ -149555,11 +153603,11 @@ public void clear() { this.success = null; } - public CurrentNotificationEventId getSuccess() { + public PutFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(CurrentNotificationEventId success) { + public void setSuccess(PutFileMetadataResult success) { this.success = success; } @@ -149584,7 +153632,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CurrentNotificationEventId)value); + setSuccess((PutFileMetadataResult)value); } break; @@ -149617,12 +153665,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_result) - return this.equals((get_current_notificationEventId_result)that); + if (that instanceof put_file_metadata_result) + return this.equals((put_file_metadata_result)that); return false; } - public boolean equals(get_current_notificationEventId_result that) { + public boolean equals(put_file_metadata_result that) { if (that == null) return false; @@ -149651,7 +153699,7 @@ public int hashCode() { } @Override - public int compareTo(get_current_notificationEventId_result other) { + public int compareTo(put_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -149685,7 +153733,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); + StringBuilder sb = new StringBuilder("put_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -149723,15 +153771,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultStandardScheme getScheme() { - return new get_current_notificationEventId_resultStandardScheme(); + private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_resultStandardScheme getScheme() { + return new put_file_metadata_resultStandardScheme(); } } - private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { + private static class put_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -149743,7 +153791,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CurrentNotificationEventId(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -149759,7 +153807,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -149774,16 +153822,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notifi } - private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultTupleScheme getScheme() { - return new get_current_notificationEventId_resultTupleScheme(); + private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_resultTupleScheme getScheme() { + return new put_file_metadata_resultTupleScheme(); } } - private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { + private static class put_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -149796,11 +153844,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notific } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CurrentNotificationEventId(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -149809,22 +153857,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notifica } - public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); + public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); } - private FireEventRequest rqst; // required + private ClearFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -149839,8 +153887,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notifica */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -149884,70 +153932,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); } - public fire_listener_event_args() { + public clear_file_metadata_args() { } - public fire_listener_event_args( - FireEventRequest rqst) + public clear_file_metadata_args( + ClearFileMetadataRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public fire_listener_event_args(fire_listener_event_args other) { - if (other.isSetRqst()) { - this.rqst = new FireEventRequest(other.rqst); + public clear_file_metadata_args(clear_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new ClearFileMetadataRequest(other.req); } } - public fire_listener_event_args deepCopy() { - return new fire_listener_event_args(this); + public clear_file_metadata_args deepCopy() { + return new clear_file_metadata_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public FireEventRequest getRqst() { - return this.rqst; + public ClearFileMetadataRequest getReq() { + return this.req; } - public void setRqst(FireEventRequest rqst) { - this.rqst = rqst; + public void setReq(ClearFileMetadataRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((FireEventRequest)value); + setReq((ClearFileMetadataRequest)value); } break; @@ -149956,8 +154004,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -149970,8 +154018,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -149980,21 +154028,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_args) - return this.equals((fire_listener_event_args)that); + if (that instanceof clear_file_metadata_args) + return this.equals((clear_file_metadata_args)that); return false; } - public boolean equals(fire_listener_event_args that) { + public boolean equals(clear_file_metadata_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -150005,28 +154053,28 @@ public boolean equals(fire_listener_event_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(fire_listener_event_args other) { + public int compareTo(clear_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -150048,14 +154096,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_args("); + StringBuilder sb = new StringBuilder("clear_file_metadata_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -150065,8 +154113,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -150086,15 +154134,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_argsStandardScheme getScheme() { - return new fire_listener_event_argsStandardScheme(); + private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsStandardScheme getScheme() { + return new clear_file_metadata_argsStandardScheme(); } } - private static class fire_listener_event_argsStandardScheme extends StandardScheme { + private static class clear_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -150104,11 +154152,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new ClearFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -150122,13 +154170,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -150137,53 +154185,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_argsTupleScheme getScheme() { - return new fire_listener_event_argsTupleScheme(); + private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsTupleScheme getScheme() { + return new clear_file_metadata_argsTupleScheme(); } } - private static class fire_listener_event_argsTupleScheme extends TupleScheme { + private static class clear_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new ClearFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); + public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); } - private FireEventResponse success; // required + private ClearFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -150248,16 +154296,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); } - public fire_listener_event_result() { + public clear_file_metadata_result() { } - public fire_listener_event_result( - FireEventResponse success) + public clear_file_metadata_result( + ClearFileMetadataResult success) { this(); this.success = success; @@ -150266,14 +154314,14 @@ public fire_listener_event_result( /** * Performs a deep copy on other. */ - public fire_listener_event_result(fire_listener_event_result other) { + public clear_file_metadata_result(clear_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new FireEventResponse(other.success); + this.success = new ClearFileMetadataResult(other.success); } } - public fire_listener_event_result deepCopy() { - return new fire_listener_event_result(this); + public clear_file_metadata_result deepCopy() { + return new clear_file_metadata_result(this); } @Override @@ -150281,11 +154329,11 @@ public void clear() { this.success = null; } - public FireEventResponse getSuccess() { + public ClearFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(FireEventResponse success) { + public void setSuccess(ClearFileMetadataResult success) { this.success = success; } @@ -150310,7 +154358,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((FireEventResponse)value); + setSuccess((ClearFileMetadataResult)value); } break; @@ -150343,12 +154391,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_result) - return this.equals((fire_listener_event_result)that); + if (that instanceof clear_file_metadata_result) + return this.equals((clear_file_metadata_result)that); return false; } - public boolean equals(fire_listener_event_result that) { + public boolean equals(clear_file_metadata_result that) { if (that == null) return false; @@ -150377,7 +154425,7 @@ public int hashCode() { } @Override - public int compareTo(fire_listener_event_result other) { + public int compareTo(clear_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -150411,7 +154459,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_result("); + StringBuilder sb = new StringBuilder("clear_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -150449,15 +154497,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_resultStandardScheme getScheme() { - return new fire_listener_event_resultStandardScheme(); + private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultStandardScheme getScheme() { + return new clear_file_metadata_resultStandardScheme(); } } - private static class fire_listener_event_resultStandardScheme extends StandardScheme { + private static class clear_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -150469,7 +154517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new FireEventResponse(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -150485,7 +154533,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -150500,16 +154548,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_resultTupleScheme getScheme() { - return new fire_listener_event_resultTupleScheme(); + private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultTupleScheme getScheme() { + return new clear_file_metadata_resultTupleScheme(); } } - private static class fire_listener_event_resultTupleScheme extends TupleScheme { + private static class clear_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -150522,11 +154570,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new FireEventResponse(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java index a877338..ecff000 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TxnAbortedException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnAbortedException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java index 8b255b9..0828397 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TxnInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnInfo"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java index 05af505..50da426 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TxnOpenException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnOpenException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java index 61e7ceb..309abe4 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Type implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Type"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java index e05e79d..cdb1671 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class UnknownDBException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownDBException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java index c626bf6..c767367 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class UnknownPartitionException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownPartitionException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java index 2856121..1d0f347 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class UnknownTableException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownTableException"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java index cf248e0..568a744 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class UnlockRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnlockRequest"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java index cc8d5f5..8d0daa5 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Version implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Version"); diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index ae47cb5..8770e85 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1007,6 +1007,29 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @return \metastore\FireEventResponse */ public function fire_listener_event(\metastore\FireEventRequest $rqst); + /** + */ + public function flushCache(); + /** + * @param \metastore\GetFileMetadataByExprRequest $req + * @return \metastore\GetFileMetadataByExprResult + */ + public function get_file_metadata_by_expr(\metastore\GetFileMetadataByExprRequest $req); + /** + * @param \metastore\GetFileMetadataRequest $req + * @return \metastore\GetFileMetadataResult + */ + public function get_file_metadata(\metastore\GetFileMetadataRequest $req); + /** + * @param \metastore\PutFileMetadataRequest $req + * @return \metastore\PutFileMetadataResult + */ + public function put_file_metadata(\metastore\PutFileMetadataRequest $req); + /** + * @param \metastore\ClearFileMetadataRequest $req + * @return \metastore\ClearFileMetadataResult + */ + public function clear_file_metadata(\metastore\ClearFileMetadataRequest $req); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -8141,196 +8164,268 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("fire_listener_event failed: unknown result"); } -} - -// HELPER FUNCTIONS AND STRUCTURES + public function flushCache() + { + $this->send_flushCache(); + $this->recv_flushCache(); + } -class ThriftHiveMetastore_getMetaConf_args { - static $_TSPEC; + public function send_flushCache() + { + $args = new \metastore\ThriftHiveMetastore_flushCache_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'flushCache', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('flushCache', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } - /** - * @var string - */ - public $key = null; + public function recv_flushCache() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_flushCache_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'key', - 'type' => TType::STRING, - ), - ); - } - if (is_array($vals)) { - if (isset($vals['key'])) { - $this->key = $vals['key']; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_flushCache_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + return; } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_args'; + public function get_file_metadata_by_expr(\metastore\GetFileMetadataByExprRequest $req) + { + $this->send_get_file_metadata_by_expr($req); + return $this->recv_get_file_metadata_by_expr(); } - public function read($input) + public function send_get_file_metadata_by_expr(\metastore\GetFileMetadataByExprRequest $req) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_get_file_metadata_by_expr_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->key); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; + thrift_protocol_write_binary($this->output_, 'get_file_metadata_by_expr', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_file_metadata_by_expr', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_file_metadata_by_expr() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_file_metadata_by_expr_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } - $xfer += $input->readFieldEnd(); + $result = new \metastore\ThriftHiveMetastore_get_file_metadata_by_expr_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - $xfer += $input->readStructEnd(); - return $xfer; + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_file_metadata_by_expr failed: unknown result"); } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); - if ($this->key !== null) { - $xfer += $output->writeFieldBegin('key', TType::STRING, 1); - $xfer += $output->writeString($this->key); - $xfer += $output->writeFieldEnd(); + public function get_file_metadata(\metastore\GetFileMetadataRequest $req) + { + $this->send_get_file_metadata($req); + return $this->recv_get_file_metadata(); + } + + public function send_get_file_metadata(\metastore\GetFileMetadataRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_file_metadata_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_file_metadata', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_file_metadata', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; } -} + public function recv_get_file_metadata() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_file_metadata_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; -class ThriftHiveMetastore_getMetaConf_result { - static $_TSPEC; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_file_metadata_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_file_metadata failed: unknown result"); + } - /** - * @var string - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; + public function put_file_metadata(\metastore\PutFileMetadataRequest $req) + { + $this->send_put_file_metadata($req); + return $this->recv_put_file_metadata(); + } - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::STRING, - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); + public function send_put_file_metadata(\metastore\PutFileMetadataRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_put_file_metadata_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'put_file_metadata', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; + else + { + $this->output_->writeMessageBegin('put_file_metadata', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_put_file_metadata() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_put_file_metadata_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_put_file_metadata_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("put_file_metadata failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_result'; + public function clear_file_metadata(\metastore\ClearFileMetadataRequest $req) + { + $this->send_clear_file_metadata($req); + return $this->recv_clear_file_metadata(); } - public function read($input) + public function send_clear_file_metadata(\metastore\ClearFileMetadataRequest $req) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_clear_file_metadata_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); + thrift_protocol_write_binary($this->output_, 'clear_file_metadata', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('clear_file_metadata', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $input->readStructEnd(); - return $xfer; } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::STRING, 0); - $xfer += $output->writeString($this->success); - $xfer += $output->writeFieldEnd(); + public function recv_clear_file_metadata() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_clear_file_metadata_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_clear_file_metadata_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); + if ($result->success !== null) { + return $result->success; } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; + throw new \Exception("clear_file_metadata failed: unknown result"); } } -class ThriftHiveMetastore_setMetaConf_args { +// HELPER FUNCTIONS AND STRUCTURES + +class ThriftHiveMetastore_getMetaConf_args { static $_TSPEC; /** * @var string */ public $key = null; - /** - * @var string - */ - public $value = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -8339,24 +8434,203 @@ class ThriftHiveMetastore_setMetaConf_args { 'var' => 'key', 'type' => TType::STRING, ), - 2 => array( - 'var' => 'value', - 'type' => TType::STRING, - ), ); } if (is_array($vals)) { if (isset($vals['key'])) { $this->key = $vals['key']; } - if (isset($vals['value'])) { - $this->value = $vals['value']; - } } } public function getName() { - return 'ThriftHiveMetastore_setMetaConf_args'; + return 'ThriftHiveMetastore_getMetaConf_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->key); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); + if ($this->key !== null) { + $xfer += $output->writeFieldBegin('key', TType::STRING, 1); + $xfer += $output->writeString($this->key); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_getMetaConf_result { + static $_TSPEC; + + /** + * @var string + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRING, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_getMetaConf_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::STRING, 0); + $xfer += $output->writeString($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_setMetaConf_args { + static $_TSPEC; + + /** + * @var string + */ + public $key = null; + /** + * @var string + */ + public $value = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'key', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'value', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['key'])) { + $this->key = $vals['key']; + } + if (isset($vals['value'])) { + $this->value = $vals['value']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_setMetaConf_args'; } public function read($input) @@ -9292,14 +9566,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size472 = 0; - $_etype475 = 0; - $xfer += $input->readListBegin($_etype475, $_size472); - for ($_i476 = 0; $_i476 < $_size472; ++$_i476) + $_size532 = 0; + $_etype535 = 0; + $xfer += $input->readListBegin($_etype535, $_size532); + for ($_i536 = 0; $_i536 < $_size532; ++$_i536) { - $elem477 = null; - $xfer += $input->readString($elem477); - $this->success []= $elem477; + $elem537 = null; + $xfer += $input->readString($elem537); + $this->success []= $elem537; } $xfer += $input->readListEnd(); } else { @@ -9335,9 +9609,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter478) + foreach ($this->success as $iter538) { - $xfer += $output->writeString($iter478); + $xfer += $output->writeString($iter538); } } $output->writeListEnd(); @@ -9468,14 +9742,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size479 = 0; - $_etype482 = 0; - $xfer += $input->readListBegin($_etype482, $_size479); - for ($_i483 = 0; $_i483 < $_size479; ++$_i483) + $_size539 = 0; + $_etype542 = 0; + $xfer += $input->readListBegin($_etype542, $_size539); + for ($_i543 = 0; $_i543 < $_size539; ++$_i543) { - $elem484 = null; - $xfer += $input->readString($elem484); - $this->success []= $elem484; + $elem544 = null; + $xfer += $input->readString($elem544); + $this->success []= $elem544; } $xfer += $input->readListEnd(); } else { @@ -9511,9 +9785,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter485) + foreach ($this->success as $iter545) { - $xfer += $output->writeString($iter485); + $xfer += $output->writeString($iter545); } } $output->writeListEnd(); @@ -10514,18 +10788,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size486 = 0; - $_ktype487 = 0; - $_vtype488 = 0; - $xfer += $input->readMapBegin($_ktype487, $_vtype488, $_size486); - for ($_i490 = 0; $_i490 < $_size486; ++$_i490) + $_size546 = 0; + $_ktype547 = 0; + $_vtype548 = 0; + $xfer += $input->readMapBegin($_ktype547, $_vtype548, $_size546); + for ($_i550 = 0; $_i550 < $_size546; ++$_i550) { - $key491 = ''; - $val492 = new \metastore\Type(); - $xfer += $input->readString($key491); - $val492 = new \metastore\Type(); - $xfer += $val492->read($input); - $this->success[$key491] = $val492; + $key551 = ''; + $val552 = new \metastore\Type(); + $xfer += $input->readString($key551); + $val552 = new \metastore\Type(); + $xfer += $val552->read($input); + $this->success[$key551] = $val552; } $xfer += $input->readMapEnd(); } else { @@ -10561,10 +10835,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter493 => $viter494) + foreach ($this->success as $kiter553 => $viter554) { - $xfer += $output->writeString($kiter493); - $xfer += $viter494->write($output); + $xfer += $output->writeString($kiter553); + $xfer += $viter554->write($output); } } $output->writeMapEnd(); @@ -10768,15 +11042,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size495 = 0; - $_etype498 = 0; - $xfer += $input->readListBegin($_etype498, $_size495); - for ($_i499 = 0; $_i499 < $_size495; ++$_i499) + $_size555 = 0; + $_etype558 = 0; + $xfer += $input->readListBegin($_etype558, $_size555); + for ($_i559 = 0; $_i559 < $_size555; ++$_i559) { - $elem500 = null; - $elem500 = new \metastore\FieldSchema(); - $xfer += $elem500->read($input); - $this->success []= $elem500; + $elem560 = null; + $elem560 = new \metastore\FieldSchema(); + $xfer += $elem560->read($input); + $this->success []= $elem560; } $xfer += $input->readListEnd(); } else { @@ -10828,9 +11102,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter501) + foreach ($this->success as $iter561) { - $xfer += $iter501->write($output); + $xfer += $iter561->write($output); } } $output->writeListEnd(); @@ -11072,15 +11346,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size502 = 0; - $_etype505 = 0; - $xfer += $input->readListBegin($_etype505, $_size502); - for ($_i506 = 0; $_i506 < $_size502; ++$_i506) + $_size562 = 0; + $_etype565 = 0; + $xfer += $input->readListBegin($_etype565, $_size562); + for ($_i566 = 0; $_i566 < $_size562; ++$_i566) { - $elem507 = null; - $elem507 = new \metastore\FieldSchema(); - $xfer += $elem507->read($input); - $this->success []= $elem507; + $elem567 = null; + $elem567 = new \metastore\FieldSchema(); + $xfer += $elem567->read($input); + $this->success []= $elem567; } $xfer += $input->readListEnd(); } else { @@ -11132,9 +11406,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter508) + foreach ($this->success as $iter568) { - $xfer += $iter508->write($output); + $xfer += $iter568->write($output); } } $output->writeListEnd(); @@ -11348,15 +11622,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size509 = 0; - $_etype512 = 0; - $xfer += $input->readListBegin($_etype512, $_size509); - for ($_i513 = 0; $_i513 < $_size509; ++$_i513) + $_size569 = 0; + $_etype572 = 0; + $xfer += $input->readListBegin($_etype572, $_size569); + for ($_i573 = 0; $_i573 < $_size569; ++$_i573) { - $elem514 = null; - $elem514 = new \metastore\FieldSchema(); - $xfer += $elem514->read($input); - $this->success []= $elem514; + $elem574 = null; + $elem574 = new \metastore\FieldSchema(); + $xfer += $elem574->read($input); + $this->success []= $elem574; } $xfer += $input->readListEnd(); } else { @@ -11408,9 +11682,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter515) + foreach ($this->success as $iter575) { - $xfer += $iter515->write($output); + $xfer += $iter575->write($output); } } $output->writeListEnd(); @@ -11652,15 +11926,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size516 = 0; - $_etype519 = 0; - $xfer += $input->readListBegin($_etype519, $_size516); - for ($_i520 = 0; $_i520 < $_size516; ++$_i520) + $_size576 = 0; + $_etype579 = 0; + $xfer += $input->readListBegin($_etype579, $_size576); + for ($_i580 = 0; $_i580 < $_size576; ++$_i580) { - $elem521 = null; - $elem521 = new \metastore\FieldSchema(); - $xfer += $elem521->read($input); - $this->success []= $elem521; + $elem581 = null; + $elem581 = new \metastore\FieldSchema(); + $xfer += $elem581->read($input); + $this->success []= $elem581; } $xfer += $input->readListEnd(); } else { @@ -11712,9 +11986,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter522) + foreach ($this->success as $iter582) { - $xfer += $iter522->write($output); + $xfer += $iter582->write($output); } } $output->writeListEnd(); @@ -12869,14 +13143,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size523 = 0; - $_etype526 = 0; - $xfer += $input->readListBegin($_etype526, $_size523); - for ($_i527 = 0; $_i527 < $_size523; ++$_i527) + $_size583 = 0; + $_etype586 = 0; + $xfer += $input->readListBegin($_etype586, $_size583); + for ($_i587 = 0; $_i587 < $_size583; ++$_i587) { - $elem528 = null; - $xfer += $input->readString($elem528); - $this->success []= $elem528; + $elem588 = null; + $xfer += $input->readString($elem588); + $this->success []= $elem588; } $xfer += $input->readListEnd(); } else { @@ -12912,9 +13186,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter529) + foreach ($this->success as $iter589) { - $xfer += $output->writeString($iter529); + $xfer += $output->writeString($iter589); } } $output->writeListEnd(); @@ -13070,14 +13344,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size530 = 0; - $_etype533 = 0; - $xfer += $input->readListBegin($_etype533, $_size530); - for ($_i534 = 0; $_i534 < $_size530; ++$_i534) + $_size590 = 0; + $_etype593 = 0; + $xfer += $input->readListBegin($_etype593, $_size590); + for ($_i594 = 0; $_i594 < $_size590; ++$_i594) { - $elem535 = null; - $xfer += $input->readString($elem535); - $this->success []= $elem535; + $elem595 = null; + $xfer += $input->readString($elem595); + $this->success []= $elem595; } $xfer += $input->readListEnd(); } else { @@ -13113,9 +13387,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter536) + foreach ($this->success as $iter596) { - $xfer += $output->writeString($iter536); + $xfer += $output->writeString($iter596); } } $output->writeListEnd(); @@ -13430,14 +13704,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size537 = 0; - $_etype540 = 0; - $xfer += $input->readListBegin($_etype540, $_size537); - for ($_i541 = 0; $_i541 < $_size537; ++$_i541) + $_size597 = 0; + $_etype600 = 0; + $xfer += $input->readListBegin($_etype600, $_size597); + for ($_i601 = 0; $_i601 < $_size597; ++$_i601) { - $elem542 = null; - $xfer += $input->readString($elem542); - $this->tbl_names []= $elem542; + $elem602 = null; + $xfer += $input->readString($elem602); + $this->tbl_names []= $elem602; } $xfer += $input->readListEnd(); } else { @@ -13470,9 +13744,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter543) + foreach ($this->tbl_names as $iter603) { - $xfer += $output->writeString($iter543); + $xfer += $output->writeString($iter603); } } $output->writeListEnd(); @@ -13573,15 +13847,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size544 = 0; - $_etype547 = 0; - $xfer += $input->readListBegin($_etype547, $_size544); - for ($_i548 = 0; $_i548 < $_size544; ++$_i548) + $_size604 = 0; + $_etype607 = 0; + $xfer += $input->readListBegin($_etype607, $_size604); + for ($_i608 = 0; $_i608 < $_size604; ++$_i608) { - $elem549 = null; - $elem549 = new \metastore\Table(); - $xfer += $elem549->read($input); - $this->success []= $elem549; + $elem609 = null; + $elem609 = new \metastore\Table(); + $xfer += $elem609->read($input); + $this->success []= $elem609; } $xfer += $input->readListEnd(); } else { @@ -13633,9 +13907,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter550) + foreach ($this->success as $iter610) { - $xfer += $iter550->write($output); + $xfer += $iter610->write($output); } } $output->writeListEnd(); @@ -13871,14 +14145,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size611 = 0; + $_etype614 = 0; + $xfer += $input->readListBegin($_etype614, $_size611); + for ($_i615 = 0; $_i615 < $_size611; ++$_i615) { - $elem556 = null; - $xfer += $input->readString($elem556); - $this->success []= $elem556; + $elem616 = null; + $xfer += $input->readString($elem616); + $this->success []= $elem616; } $xfer += $input->readListEnd(); } else { @@ -13930,9 +14204,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter557) + foreach ($this->success as $iter617) { - $xfer += $output->writeString($iter557); + $xfer += $output->writeString($iter617); } } $output->writeListEnd(); @@ -15245,15 +15519,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readListBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size618 = 0; + $_etype621 = 0; + $xfer += $input->readListBegin($_etype621, $_size618); + for ($_i622 = 0; $_i622 < $_size618; ++$_i622) { - $elem563 = null; - $elem563 = new \metastore\Partition(); - $xfer += $elem563->read($input); - $this->new_parts []= $elem563; + $elem623 = null; + $elem623 = new \metastore\Partition(); + $xfer += $elem623->read($input); + $this->new_parts []= $elem623; } $xfer += $input->readListEnd(); } else { @@ -15281,9 +15555,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter564) + foreach ($this->new_parts as $iter624) { - $xfer += $iter564->write($output); + $xfer += $iter624->write($output); } } $output->writeListEnd(); @@ -15498,15 +15772,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size565 = 0; - $_etype568 = 0; - $xfer += $input->readListBegin($_etype568, $_size565); - for ($_i569 = 0; $_i569 < $_size565; ++$_i569) + $_size625 = 0; + $_etype628 = 0; + $xfer += $input->readListBegin($_etype628, $_size625); + for ($_i629 = 0; $_i629 < $_size625; ++$_i629) { - $elem570 = null; - $elem570 = new \metastore\PartitionSpec(); - $xfer += $elem570->read($input); - $this->new_parts []= $elem570; + $elem630 = null; + $elem630 = new \metastore\PartitionSpec(); + $xfer += $elem630->read($input); + $this->new_parts []= $elem630; } $xfer += $input->readListEnd(); } else { @@ -15534,9 +15808,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter571) + foreach ($this->new_parts as $iter631) { - $xfer += $iter571->write($output); + $xfer += $iter631->write($output); } } $output->writeListEnd(); @@ -15786,14 +16060,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size572 = 0; - $_etype575 = 0; - $xfer += $input->readListBegin($_etype575, $_size572); - for ($_i576 = 0; $_i576 < $_size572; ++$_i576) + $_size632 = 0; + $_etype635 = 0; + $xfer += $input->readListBegin($_etype635, $_size632); + for ($_i636 = 0; $_i636 < $_size632; ++$_i636) { - $elem577 = null; - $xfer += $input->readString($elem577); - $this->part_vals []= $elem577; + $elem637 = null; + $xfer += $input->readString($elem637); + $this->part_vals []= $elem637; } $xfer += $input->readListEnd(); } else { @@ -15831,9 +16105,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter578) + foreach ($this->part_vals as $iter638) { - $xfer += $output->writeString($iter578); + $xfer += $output->writeString($iter638); } } $output->writeListEnd(); @@ -16335,14 +16609,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size579 = 0; - $_etype582 = 0; - $xfer += $input->readListBegin($_etype582, $_size579); - for ($_i583 = 0; $_i583 < $_size579; ++$_i583) + $_size639 = 0; + $_etype642 = 0; + $xfer += $input->readListBegin($_etype642, $_size639); + for ($_i643 = 0; $_i643 < $_size639; ++$_i643) { - $elem584 = null; - $xfer += $input->readString($elem584); - $this->part_vals []= $elem584; + $elem644 = null; + $xfer += $input->readString($elem644); + $this->part_vals []= $elem644; } $xfer += $input->readListEnd(); } else { @@ -16388,9 +16662,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter585) + foreach ($this->part_vals as $iter645) { - $xfer += $output->writeString($iter585); + $xfer += $output->writeString($iter645); } } $output->writeListEnd(); @@ -17244,14 +17518,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size586 = 0; - $_etype589 = 0; - $xfer += $input->readListBegin($_etype589, $_size586); - for ($_i590 = 0; $_i590 < $_size586; ++$_i590) + $_size646 = 0; + $_etype649 = 0; + $xfer += $input->readListBegin($_etype649, $_size646); + for ($_i650 = 0; $_i650 < $_size646; ++$_i650) { - $elem591 = null; - $xfer += $input->readString($elem591); - $this->part_vals []= $elem591; + $elem651 = null; + $xfer += $input->readString($elem651); + $this->part_vals []= $elem651; } $xfer += $input->readListEnd(); } else { @@ -17296,9 +17570,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter592) + foreach ($this->part_vals as $iter652) { - $xfer += $output->writeString($iter592); + $xfer += $output->writeString($iter652); } } $output->writeListEnd(); @@ -17551,14 +17825,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size593 = 0; - $_etype596 = 0; - $xfer += $input->readListBegin($_etype596, $_size593); - for ($_i597 = 0; $_i597 < $_size593; ++$_i597) + $_size653 = 0; + $_etype656 = 0; + $xfer += $input->readListBegin($_etype656, $_size653); + for ($_i657 = 0; $_i657 < $_size653; ++$_i657) { - $elem598 = null; - $xfer += $input->readString($elem598); - $this->part_vals []= $elem598; + $elem658 = null; + $xfer += $input->readString($elem658); + $this->part_vals []= $elem658; } $xfer += $input->readListEnd(); } else { @@ -17611,9 +17885,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter599) + foreach ($this->part_vals as $iter659) { - $xfer += $output->writeString($iter599); + $xfer += $output->writeString($iter659); } } $output->writeListEnd(); @@ -18627,14 +18901,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size600 = 0; - $_etype603 = 0; - $xfer += $input->readListBegin($_etype603, $_size600); - for ($_i604 = 0; $_i604 < $_size600; ++$_i604) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem605 = null; - $xfer += $input->readString($elem605); - $this->part_vals []= $elem605; + $elem665 = null; + $xfer += $input->readString($elem665); + $this->part_vals []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -18672,9 +18946,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter606) + foreach ($this->part_vals as $iter666) { - $xfer += $output->writeString($iter606); + $xfer += $output->writeString($iter666); } } $output->writeListEnd(); @@ -18916,17 +19190,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size607 = 0; - $_ktype608 = 0; - $_vtype609 = 0; - $xfer += $input->readMapBegin($_ktype608, $_vtype609, $_size607); - for ($_i611 = 0; $_i611 < $_size607; ++$_i611) + $_size667 = 0; + $_ktype668 = 0; + $_vtype669 = 0; + $xfer += $input->readMapBegin($_ktype668, $_vtype669, $_size667); + for ($_i671 = 0; $_i671 < $_size667; ++$_i671) { - $key612 = ''; - $val613 = ''; - $xfer += $input->readString($key612); - $xfer += $input->readString($val613); - $this->partitionSpecs[$key612] = $val613; + $key672 = ''; + $val673 = ''; + $xfer += $input->readString($key672); + $xfer += $input->readString($val673); + $this->partitionSpecs[$key672] = $val673; } $xfer += $input->readMapEnd(); } else { @@ -18982,10 +19256,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter614 => $viter615) + foreach ($this->partitionSpecs as $kiter674 => $viter675) { - $xfer += $output->writeString($kiter614); - $xfer += $output->writeString($viter615); + $xfer += $output->writeString($kiter674); + $xfer += $output->writeString($viter675); } } $output->writeMapEnd(); @@ -19311,14 +19585,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size616 = 0; - $_etype619 = 0; - $xfer += $input->readListBegin($_etype619, $_size616); - for ($_i620 = 0; $_i620 < $_size616; ++$_i620) + $_size676 = 0; + $_etype679 = 0; + $xfer += $input->readListBegin($_etype679, $_size676); + for ($_i680 = 0; $_i680 < $_size676; ++$_i680) { - $elem621 = null; - $xfer += $input->readString($elem621); - $this->part_vals []= $elem621; + $elem681 = null; + $xfer += $input->readString($elem681); + $this->part_vals []= $elem681; } $xfer += $input->readListEnd(); } else { @@ -19335,14 +19609,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size622 = 0; - $_etype625 = 0; - $xfer += $input->readListBegin($_etype625, $_size622); - for ($_i626 = 0; $_i626 < $_size622; ++$_i626) + $_size682 = 0; + $_etype685 = 0; + $xfer += $input->readListBegin($_etype685, $_size682); + for ($_i686 = 0; $_i686 < $_size682; ++$_i686) { - $elem627 = null; - $xfer += $input->readString($elem627); - $this->group_names []= $elem627; + $elem687 = null; + $xfer += $input->readString($elem687); + $this->group_names []= $elem687; } $xfer += $input->readListEnd(); } else { @@ -19380,9 +19654,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter628) + foreach ($this->part_vals as $iter688) { - $xfer += $output->writeString($iter628); + $xfer += $output->writeString($iter688); } } $output->writeListEnd(); @@ -19402,9 +19676,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter629) + foreach ($this->group_names as $iter689) { - $xfer += $output->writeString($iter629); + $xfer += $output->writeString($iter689); } } $output->writeListEnd(); @@ -19995,15 +20269,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size630 = 0; - $_etype633 = 0; - $xfer += $input->readListBegin($_etype633, $_size630); - for ($_i634 = 0; $_i634 < $_size630; ++$_i634) + $_size690 = 0; + $_etype693 = 0; + $xfer += $input->readListBegin($_etype693, $_size690); + for ($_i694 = 0; $_i694 < $_size690; ++$_i694) { - $elem635 = null; - $elem635 = new \metastore\Partition(); - $xfer += $elem635->read($input); - $this->success []= $elem635; + $elem695 = null; + $elem695 = new \metastore\Partition(); + $xfer += $elem695->read($input); + $this->success []= $elem695; } $xfer += $input->readListEnd(); } else { @@ -20047,9 +20321,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter636) + foreach ($this->success as $iter696) { - $xfer += $iter636->write($output); + $xfer += $iter696->write($output); } } $output->writeListEnd(); @@ -20195,14 +20469,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size637 = 0; - $_etype640 = 0; - $xfer += $input->readListBegin($_etype640, $_size637); - for ($_i641 = 0; $_i641 < $_size637; ++$_i641) + $_size697 = 0; + $_etype700 = 0; + $xfer += $input->readListBegin($_etype700, $_size697); + for ($_i701 = 0; $_i701 < $_size697; ++$_i701) { - $elem642 = null; - $xfer += $input->readString($elem642); - $this->group_names []= $elem642; + $elem702 = null; + $xfer += $input->readString($elem702); + $this->group_names []= $elem702; } $xfer += $input->readListEnd(); } else { @@ -20250,9 +20524,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter643) + foreach ($this->group_names as $iter703) { - $xfer += $output->writeString($iter643); + $xfer += $output->writeString($iter703); } } $output->writeListEnd(); @@ -20341,15 +20615,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size644 = 0; - $_etype647 = 0; - $xfer += $input->readListBegin($_etype647, $_size644); - for ($_i648 = 0; $_i648 < $_size644; ++$_i648) + $_size704 = 0; + $_etype707 = 0; + $xfer += $input->readListBegin($_etype707, $_size704); + for ($_i708 = 0; $_i708 < $_size704; ++$_i708) { - $elem649 = null; - $elem649 = new \metastore\Partition(); - $xfer += $elem649->read($input); - $this->success []= $elem649; + $elem709 = null; + $elem709 = new \metastore\Partition(); + $xfer += $elem709->read($input); + $this->success []= $elem709; } $xfer += $input->readListEnd(); } else { @@ -20393,9 +20667,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter650) + foreach ($this->success as $iter710) { - $xfer += $iter650->write($output); + $xfer += $iter710->write($output); } } $output->writeListEnd(); @@ -20615,15 +20889,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size651 = 0; - $_etype654 = 0; - $xfer += $input->readListBegin($_etype654, $_size651); - for ($_i655 = 0; $_i655 < $_size651; ++$_i655) + $_size711 = 0; + $_etype714 = 0; + $xfer += $input->readListBegin($_etype714, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $elem656 = null; - $elem656 = new \metastore\PartitionSpec(); - $xfer += $elem656->read($input); - $this->success []= $elem656; + $elem716 = null; + $elem716 = new \metastore\PartitionSpec(); + $xfer += $elem716->read($input); + $this->success []= $elem716; } $xfer += $input->readListEnd(); } else { @@ -20667,9 +20941,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter657) + foreach ($this->success as $iter717) { - $xfer += $iter657->write($output); + $xfer += $iter717->write($output); } } $output->writeListEnd(); @@ -20876,14 +21150,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size658 = 0; - $_etype661 = 0; - $xfer += $input->readListBegin($_etype661, $_size658); - for ($_i662 = 0; $_i662 < $_size658; ++$_i662) + $_size718 = 0; + $_etype721 = 0; + $xfer += $input->readListBegin($_etype721, $_size718); + for ($_i722 = 0; $_i722 < $_size718; ++$_i722) { - $elem663 = null; - $xfer += $input->readString($elem663); - $this->success []= $elem663; + $elem723 = null; + $xfer += $input->readString($elem723); + $this->success []= $elem723; } $xfer += $input->readListEnd(); } else { @@ -20919,9 +21193,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter664) + foreach ($this->success as $iter724) { - $xfer += $output->writeString($iter664); + $xfer += $output->writeString($iter724); } } $output->writeListEnd(); @@ -21037,14 +21311,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size665 = 0; - $_etype668 = 0; - $xfer += $input->readListBegin($_etype668, $_size665); - for ($_i669 = 0; $_i669 < $_size665; ++$_i669) + $_size725 = 0; + $_etype728 = 0; + $xfer += $input->readListBegin($_etype728, $_size725); + for ($_i729 = 0; $_i729 < $_size725; ++$_i729) { - $elem670 = null; - $xfer += $input->readString($elem670); - $this->part_vals []= $elem670; + $elem730 = null; + $xfer += $input->readString($elem730); + $this->part_vals []= $elem730; } $xfer += $input->readListEnd(); } else { @@ -21089,9 +21363,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter671) + foreach ($this->part_vals as $iter731) { - $xfer += $output->writeString($iter671); + $xfer += $output->writeString($iter731); } } $output->writeListEnd(); @@ -21185,15 +21459,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size672 = 0; - $_etype675 = 0; - $xfer += $input->readListBegin($_etype675, $_size672); - for ($_i676 = 0; $_i676 < $_size672; ++$_i676) + $_size732 = 0; + $_etype735 = 0; + $xfer += $input->readListBegin($_etype735, $_size732); + for ($_i736 = 0; $_i736 < $_size732; ++$_i736) { - $elem677 = null; - $elem677 = new \metastore\Partition(); - $xfer += $elem677->read($input); - $this->success []= $elem677; + $elem737 = null; + $elem737 = new \metastore\Partition(); + $xfer += $elem737->read($input); + $this->success []= $elem737; } $xfer += $input->readListEnd(); } else { @@ -21237,9 +21511,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter678) + foreach ($this->success as $iter738) { - $xfer += $iter678->write($output); + $xfer += $iter738->write($output); } } $output->writeListEnd(); @@ -21386,14 +21660,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size679 = 0; - $_etype682 = 0; - $xfer += $input->readListBegin($_etype682, $_size679); - for ($_i683 = 0; $_i683 < $_size679; ++$_i683) + $_size739 = 0; + $_etype742 = 0; + $xfer += $input->readListBegin($_etype742, $_size739); + for ($_i743 = 0; $_i743 < $_size739; ++$_i743) { - $elem684 = null; - $xfer += $input->readString($elem684); - $this->part_vals []= $elem684; + $elem744 = null; + $xfer += $input->readString($elem744); + $this->part_vals []= $elem744; } $xfer += $input->readListEnd(); } else { @@ -21417,14 +21691,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size685 = 0; - $_etype688 = 0; - $xfer += $input->readListBegin($_etype688, $_size685); - for ($_i689 = 0; $_i689 < $_size685; ++$_i689) + $_size745 = 0; + $_etype748 = 0; + $xfer += $input->readListBegin($_etype748, $_size745); + for ($_i749 = 0; $_i749 < $_size745; ++$_i749) { - $elem690 = null; - $xfer += $input->readString($elem690); - $this->group_names []= $elem690; + $elem750 = null; + $xfer += $input->readString($elem750); + $this->group_names []= $elem750; } $xfer += $input->readListEnd(); } else { @@ -21462,9 +21736,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter691) + foreach ($this->part_vals as $iter751) { - $xfer += $output->writeString($iter691); + $xfer += $output->writeString($iter751); } } $output->writeListEnd(); @@ -21489,9 +21763,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter692) + foreach ($this->group_names as $iter752) { - $xfer += $output->writeString($iter692); + $xfer += $output->writeString($iter752); } } $output->writeListEnd(); @@ -21580,15 +21854,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size693 = 0; - $_etype696 = 0; - $xfer += $input->readListBegin($_etype696, $_size693); - for ($_i697 = 0; $_i697 < $_size693; ++$_i697) + $_size753 = 0; + $_etype756 = 0; + $xfer += $input->readListBegin($_etype756, $_size753); + for ($_i757 = 0; $_i757 < $_size753; ++$_i757) { - $elem698 = null; - $elem698 = new \metastore\Partition(); - $xfer += $elem698->read($input); - $this->success []= $elem698; + $elem758 = null; + $elem758 = new \metastore\Partition(); + $xfer += $elem758->read($input); + $this->success []= $elem758; } $xfer += $input->readListEnd(); } else { @@ -21632,9 +21906,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter699) + foreach ($this->success as $iter759) { - $xfer += $iter699->write($output); + $xfer += $iter759->write($output); } } $output->writeListEnd(); @@ -21755,14 +22029,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size700 = 0; - $_etype703 = 0; - $xfer += $input->readListBegin($_etype703, $_size700); - for ($_i704 = 0; $_i704 < $_size700; ++$_i704) + $_size760 = 0; + $_etype763 = 0; + $xfer += $input->readListBegin($_etype763, $_size760); + for ($_i764 = 0; $_i764 < $_size760; ++$_i764) { - $elem705 = null; - $xfer += $input->readString($elem705); - $this->part_vals []= $elem705; + $elem765 = null; + $xfer += $input->readString($elem765); + $this->part_vals []= $elem765; } $xfer += $input->readListEnd(); } else { @@ -21807,9 +22081,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter706) + foreach ($this->part_vals as $iter766) { - $xfer += $output->writeString($iter706); + $xfer += $output->writeString($iter766); } } $output->writeListEnd(); @@ -21902,14 +22176,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size707 = 0; - $_etype710 = 0; - $xfer += $input->readListBegin($_etype710, $_size707); - for ($_i711 = 0; $_i711 < $_size707; ++$_i711) + $_size767 = 0; + $_etype770 = 0; + $xfer += $input->readListBegin($_etype770, $_size767); + for ($_i771 = 0; $_i771 < $_size767; ++$_i771) { - $elem712 = null; - $xfer += $input->readString($elem712); - $this->success []= $elem712; + $elem772 = null; + $xfer += $input->readString($elem772); + $this->success []= $elem772; } $xfer += $input->readListEnd(); } else { @@ -21953,9 +22227,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter713) + foreach ($this->success as $iter773) { - $xfer += $output->writeString($iter713); + $xfer += $output->writeString($iter773); } } $output->writeListEnd(); @@ -22198,15 +22472,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size714 = 0; - $_etype717 = 0; - $xfer += $input->readListBegin($_etype717, $_size714); - for ($_i718 = 0; $_i718 < $_size714; ++$_i718) + $_size774 = 0; + $_etype777 = 0; + $xfer += $input->readListBegin($_etype777, $_size774); + for ($_i778 = 0; $_i778 < $_size774; ++$_i778) { - $elem719 = null; - $elem719 = new \metastore\Partition(); - $xfer += $elem719->read($input); - $this->success []= $elem719; + $elem779 = null; + $elem779 = new \metastore\Partition(); + $xfer += $elem779->read($input); + $this->success []= $elem779; } $xfer += $input->readListEnd(); } else { @@ -22250,9 +22524,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter720) + foreach ($this->success as $iter780) { - $xfer += $iter720->write($output); + $xfer += $iter780->write($output); } } $output->writeListEnd(); @@ -22495,15 +22769,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size721 = 0; - $_etype724 = 0; - $xfer += $input->readListBegin($_etype724, $_size721); - for ($_i725 = 0; $_i725 < $_size721; ++$_i725) + $_size781 = 0; + $_etype784 = 0; + $xfer += $input->readListBegin($_etype784, $_size781); + for ($_i785 = 0; $_i785 < $_size781; ++$_i785) { - $elem726 = null; - $elem726 = new \metastore\PartitionSpec(); - $xfer += $elem726->read($input); - $this->success []= $elem726; + $elem786 = null; + $elem786 = new \metastore\PartitionSpec(); + $xfer += $elem786->read($input); + $this->success []= $elem786; } $xfer += $input->readListEnd(); } else { @@ -22547,9 +22821,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter727) + foreach ($this->success as $iter787) { - $xfer += $iter727->write($output); + $xfer += $iter787->write($output); } } $output->writeListEnd(); @@ -22869,14 +23143,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size728 = 0; - $_etype731 = 0; - $xfer += $input->readListBegin($_etype731, $_size728); - for ($_i732 = 0; $_i732 < $_size728; ++$_i732) + $_size788 = 0; + $_etype791 = 0; + $xfer += $input->readListBegin($_etype791, $_size788); + for ($_i792 = 0; $_i792 < $_size788; ++$_i792) { - $elem733 = null; - $xfer += $input->readString($elem733); - $this->names []= $elem733; + $elem793 = null; + $xfer += $input->readString($elem793); + $this->names []= $elem793; } $xfer += $input->readListEnd(); } else { @@ -22914,9 +23188,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter734) + foreach ($this->names as $iter794) { - $xfer += $output->writeString($iter734); + $xfer += $output->writeString($iter794); } } $output->writeListEnd(); @@ -23005,15 +23279,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size735 = 0; - $_etype738 = 0; - $xfer += $input->readListBegin($_etype738, $_size735); - for ($_i739 = 0; $_i739 < $_size735; ++$_i739) + $_size795 = 0; + $_etype798 = 0; + $xfer += $input->readListBegin($_etype798, $_size795); + for ($_i799 = 0; $_i799 < $_size795; ++$_i799) { - $elem740 = null; - $elem740 = new \metastore\Partition(); - $xfer += $elem740->read($input); - $this->success []= $elem740; + $elem800 = null; + $elem800 = new \metastore\Partition(); + $xfer += $elem800->read($input); + $this->success []= $elem800; } $xfer += $input->readListEnd(); } else { @@ -23057,9 +23331,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter741) + foreach ($this->success as $iter801) { - $xfer += $iter741->write($output); + $xfer += $iter801->write($output); } } $output->writeListEnd(); @@ -23398,15 +23672,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size742 = 0; - $_etype745 = 0; - $xfer += $input->readListBegin($_etype745, $_size742); - for ($_i746 = 0; $_i746 < $_size742; ++$_i746) + $_size802 = 0; + $_etype805 = 0; + $xfer += $input->readListBegin($_etype805, $_size802); + for ($_i806 = 0; $_i806 < $_size802; ++$_i806) { - $elem747 = null; - $elem747 = new \metastore\Partition(); - $xfer += $elem747->read($input); - $this->new_parts []= $elem747; + $elem807 = null; + $elem807 = new \metastore\Partition(); + $xfer += $elem807->read($input); + $this->new_parts []= $elem807; } $xfer += $input->readListEnd(); } else { @@ -23444,9 +23718,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter748) + foreach ($this->new_parts as $iter808) { - $xfer += $iter748->write($output); + $xfer += $iter808->write($output); } } $output->writeListEnd(); @@ -23916,14 +24190,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size749 = 0; - $_etype752 = 0; - $xfer += $input->readListBegin($_etype752, $_size749); - for ($_i753 = 0; $_i753 < $_size749; ++$_i753) + $_size809 = 0; + $_etype812 = 0; + $xfer += $input->readListBegin($_etype812, $_size809); + for ($_i813 = 0; $_i813 < $_size809; ++$_i813) { - $elem754 = null; - $xfer += $input->readString($elem754); - $this->part_vals []= $elem754; + $elem814 = null; + $xfer += $input->readString($elem814); + $this->part_vals []= $elem814; } $xfer += $input->readListEnd(); } else { @@ -23969,9 +24243,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter755) + foreach ($this->part_vals as $iter815) { - $xfer += $output->writeString($iter755); + $xfer += $output->writeString($iter815); } } $output->writeListEnd(); @@ -24156,14 +24430,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size756 = 0; - $_etype759 = 0; - $xfer += $input->readListBegin($_etype759, $_size756); - for ($_i760 = 0; $_i760 < $_size756; ++$_i760) + $_size816 = 0; + $_etype819 = 0; + $xfer += $input->readListBegin($_etype819, $_size816); + for ($_i820 = 0; $_i820 < $_size816; ++$_i820) { - $elem761 = null; - $xfer += $input->readString($elem761); - $this->part_vals []= $elem761; + $elem821 = null; + $xfer += $input->readString($elem821); + $this->part_vals []= $elem821; } $xfer += $input->readListEnd(); } else { @@ -24198,9 +24472,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter762) + foreach ($this->part_vals as $iter822) { - $xfer += $output->writeString($iter762); + $xfer += $output->writeString($iter822); } } $output->writeListEnd(); @@ -24654,14 +24928,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size823 = 0; + $_etype826 = 0; + $xfer += $input->readListBegin($_etype826, $_size823); + for ($_i827 = 0; $_i827 < $_size823; ++$_i827) { - $elem768 = null; - $xfer += $input->readString($elem768); - $this->success []= $elem768; + $elem828 = null; + $xfer += $input->readString($elem828); + $this->success []= $elem828; } $xfer += $input->readListEnd(); } else { @@ -24697,9 +24971,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter769) + foreach ($this->success as $iter829) { - $xfer += $output->writeString($iter769); + $xfer += $output->writeString($iter829); } } $output->writeListEnd(); @@ -24859,17 +25133,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size770 = 0; - $_ktype771 = 0; - $_vtype772 = 0; - $xfer += $input->readMapBegin($_ktype771, $_vtype772, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size830 = 0; + $_ktype831 = 0; + $_vtype832 = 0; + $xfer += $input->readMapBegin($_ktype831, $_vtype832, $_size830); + for ($_i834 = 0; $_i834 < $_size830; ++$_i834) { - $key775 = ''; - $val776 = ''; - $xfer += $input->readString($key775); - $xfer += $input->readString($val776); - $this->success[$key775] = $val776; + $key835 = ''; + $val836 = ''; + $xfer += $input->readString($key835); + $xfer += $input->readString($val836); + $this->success[$key835] = $val836; } $xfer += $input->readMapEnd(); } else { @@ -24905,10 +25179,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter777 => $viter778) + foreach ($this->success as $kiter837 => $viter838) { - $xfer += $output->writeString($kiter777); - $xfer += $output->writeString($viter778); + $xfer += $output->writeString($kiter837); + $xfer += $output->writeString($viter838); } } $output->writeMapEnd(); @@ -25028,17 +25302,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size779 = 0; - $_ktype780 = 0; - $_vtype781 = 0; - $xfer += $input->readMapBegin($_ktype780, $_vtype781, $_size779); - for ($_i783 = 0; $_i783 < $_size779; ++$_i783) + $_size839 = 0; + $_ktype840 = 0; + $_vtype841 = 0; + $xfer += $input->readMapBegin($_ktype840, $_vtype841, $_size839); + for ($_i843 = 0; $_i843 < $_size839; ++$_i843) { - $key784 = ''; - $val785 = ''; - $xfer += $input->readString($key784); - $xfer += $input->readString($val785); - $this->part_vals[$key784] = $val785; + $key844 = ''; + $val845 = ''; + $xfer += $input->readString($key844); + $xfer += $input->readString($val845); + $this->part_vals[$key844] = $val845; } $xfer += $input->readMapEnd(); } else { @@ -25083,10 +25357,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter786 => $viter787) + foreach ($this->part_vals as $kiter846 => $viter847) { - $xfer += $output->writeString($kiter786); - $xfer += $output->writeString($viter787); + $xfer += $output->writeString($kiter846); + $xfer += $output->writeString($viter847); } } $output->writeMapEnd(); @@ -25408,17 +25682,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size788 = 0; - $_ktype789 = 0; - $_vtype790 = 0; - $xfer += $input->readMapBegin($_ktype789, $_vtype790, $_size788); - for ($_i792 = 0; $_i792 < $_size788; ++$_i792) + $_size848 = 0; + $_ktype849 = 0; + $_vtype850 = 0; + $xfer += $input->readMapBegin($_ktype849, $_vtype850, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) { - $key793 = ''; - $val794 = ''; - $xfer += $input->readString($key793); - $xfer += $input->readString($val794); - $this->part_vals[$key793] = $val794; + $key853 = ''; + $val854 = ''; + $xfer += $input->readString($key853); + $xfer += $input->readString($val854); + $this->part_vals[$key853] = $val854; } $xfer += $input->readMapEnd(); } else { @@ -25463,10 +25737,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter795 => $viter796) + foreach ($this->part_vals as $kiter855 => $viter856) { - $xfer += $output->writeString($kiter795); - $xfer += $output->writeString($viter796); + $xfer += $output->writeString($kiter855); + $xfer += $output->writeString($viter856); } } $output->writeMapEnd(); @@ -26940,15 +27214,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size797 = 0; - $_etype800 = 0; - $xfer += $input->readListBegin($_etype800, $_size797); - for ($_i801 = 0; $_i801 < $_size797; ++$_i801) + $_size857 = 0; + $_etype860 = 0; + $xfer += $input->readListBegin($_etype860, $_size857); + for ($_i861 = 0; $_i861 < $_size857; ++$_i861) { - $elem802 = null; - $elem802 = new \metastore\Index(); - $xfer += $elem802->read($input); - $this->success []= $elem802; + $elem862 = null; + $elem862 = new \metastore\Index(); + $xfer += $elem862->read($input); + $this->success []= $elem862; } $xfer += $input->readListEnd(); } else { @@ -26992,9 +27266,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter803) + foreach ($this->success as $iter863) { - $xfer += $iter803->write($output); + $xfer += $iter863->write($output); } } $output->writeListEnd(); @@ -27201,14 +27475,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size804 = 0; - $_etype807 = 0; - $xfer += $input->readListBegin($_etype807, $_size804); - for ($_i808 = 0; $_i808 < $_size804; ++$_i808) + $_size864 = 0; + $_etype867 = 0; + $xfer += $input->readListBegin($_etype867, $_size864); + for ($_i868 = 0; $_i868 < $_size864; ++$_i868) { - $elem809 = null; - $xfer += $input->readString($elem809); - $this->success []= $elem809; + $elem869 = null; + $xfer += $input->readString($elem869); + $this->success []= $elem869; } $xfer += $input->readListEnd(); } else { @@ -27244,9 +27518,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter810) + foreach ($this->success as $iter870) { - $xfer += $output->writeString($iter810); + $xfer += $output->writeString($iter870); } } $output->writeListEnd(); @@ -30720,14 +30994,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size811 = 0; - $_etype814 = 0; - $xfer += $input->readListBegin($_etype814, $_size811); - for ($_i815 = 0; $_i815 < $_size811; ++$_i815) + $_size871 = 0; + $_etype874 = 0; + $xfer += $input->readListBegin($_etype874, $_size871); + for ($_i875 = 0; $_i875 < $_size871; ++$_i875) { - $elem816 = null; - $xfer += $input->readString($elem816); - $this->success []= $elem816; + $elem876 = null; + $xfer += $input->readString($elem876); + $this->success []= $elem876; } $xfer += $input->readListEnd(); } else { @@ -30763,9 +31037,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter817) + foreach ($this->success as $iter877) { - $xfer += $output->writeString($iter817); + $xfer += $output->writeString($iter877); } } $output->writeListEnd(); @@ -31634,14 +31908,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size818 = 0; - $_etype821 = 0; - $xfer += $input->readListBegin($_etype821, $_size818); - for ($_i822 = 0; $_i822 < $_size818; ++$_i822) + $_size878 = 0; + $_etype881 = 0; + $xfer += $input->readListBegin($_etype881, $_size878); + for ($_i882 = 0; $_i882 < $_size878; ++$_i882) { - $elem823 = null; - $xfer += $input->readString($elem823); - $this->success []= $elem823; + $elem883 = null; + $xfer += $input->readString($elem883); + $this->success []= $elem883; } $xfer += $input->readListEnd(); } else { @@ -31677,9 +31951,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter824) + foreach ($this->success as $iter884) { - $xfer += $output->writeString($iter824); + $xfer += $output->writeString($iter884); } } $output->writeListEnd(); @@ -32370,15 +32644,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size825 = 0; - $_etype828 = 0; - $xfer += $input->readListBegin($_etype828, $_size825); - for ($_i829 = 0; $_i829 < $_size825; ++$_i829) + $_size885 = 0; + $_etype888 = 0; + $xfer += $input->readListBegin($_etype888, $_size885); + for ($_i889 = 0; $_i889 < $_size885; ++$_i889) { - $elem830 = null; - $elem830 = new \metastore\Role(); - $xfer += $elem830->read($input); - $this->success []= $elem830; + $elem890 = null; + $elem890 = new \metastore\Role(); + $xfer += $elem890->read($input); + $this->success []= $elem890; } $xfer += $input->readListEnd(); } else { @@ -32414,9 +32688,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter831) + foreach ($this->success as $iter891) { - $xfer += $iter831->write($output); + $xfer += $iter891->write($output); } } $output->writeListEnd(); @@ -33078,14 +33352,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size832 = 0; - $_etype835 = 0; - $xfer += $input->readListBegin($_etype835, $_size832); - for ($_i836 = 0; $_i836 < $_size832; ++$_i836) + $_size892 = 0; + $_etype895 = 0; + $xfer += $input->readListBegin($_etype895, $_size892); + for ($_i896 = 0; $_i896 < $_size892; ++$_i896) { - $elem837 = null; - $xfer += $input->readString($elem837); - $this->group_names []= $elem837; + $elem897 = null; + $xfer += $input->readString($elem897); + $this->group_names []= $elem897; } $xfer += $input->readListEnd(); } else { @@ -33126,9 +33400,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter838) + foreach ($this->group_names as $iter898) { - $xfer += $output->writeString($iter838); + $xfer += $output->writeString($iter898); } } $output->writeListEnd(); @@ -33436,15 +33710,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size839 = 0; - $_etype842 = 0; - $xfer += $input->readListBegin($_etype842, $_size839); - for ($_i843 = 0; $_i843 < $_size839; ++$_i843) + $_size899 = 0; + $_etype902 = 0; + $xfer += $input->readListBegin($_etype902, $_size899); + for ($_i903 = 0; $_i903 < $_size899; ++$_i903) { - $elem844 = null; - $elem844 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem844->read($input); - $this->success []= $elem844; + $elem904 = null; + $elem904 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem904->read($input); + $this->success []= $elem904; } $xfer += $input->readListEnd(); } else { @@ -33480,9 +33754,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter845) + foreach ($this->success as $iter905) { - $xfer += $iter845->write($output); + $xfer += $iter905->write($output); } } $output->writeListEnd(); @@ -34114,14 +34388,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size846 = 0; - $_etype849 = 0; - $xfer += $input->readListBegin($_etype849, $_size846); - for ($_i850 = 0; $_i850 < $_size846; ++$_i850) + $_size906 = 0; + $_etype909 = 0; + $xfer += $input->readListBegin($_etype909, $_size906); + for ($_i910 = 0; $_i910 < $_size906; ++$_i910) { - $elem851 = null; - $xfer += $input->readString($elem851); - $this->group_names []= $elem851; + $elem911 = null; + $xfer += $input->readString($elem911); + $this->group_names []= $elem911; } $xfer += $input->readListEnd(); } else { @@ -34154,9 +34428,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter852) + foreach ($this->group_names as $iter912) { - $xfer += $output->writeString($iter852); + $xfer += $output->writeString($iter912); } } $output->writeListEnd(); @@ -34232,14 +34506,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size853 = 0; - $_etype856 = 0; - $xfer += $input->readListBegin($_etype856, $_size853); - for ($_i857 = 0; $_i857 < $_size853; ++$_i857) + $_size913 = 0; + $_etype916 = 0; + $xfer += $input->readListBegin($_etype916, $_size913); + for ($_i917 = 0; $_i917 < $_size913; ++$_i917) { - $elem858 = null; - $xfer += $input->readString($elem858); - $this->success []= $elem858; + $elem918 = null; + $xfer += $input->readString($elem918); + $this->success []= $elem918; } $xfer += $input->readListEnd(); } else { @@ -34275,9 +34549,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter859) + foreach ($this->success as $iter919) { - $xfer += $output->writeString($iter859); + $xfer += $output->writeString($iter919); } } $output->writeListEnd(); @@ -36856,7 +37130,849 @@ class ThriftHiveMetastore_compact_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_compact_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_compact_result'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_show_compact_args { + static $_TSPEC; + + /** + * @var \metastore\ShowCompactRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\ShowCompactRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_show_compact_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\ShowCompactRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_show_compact_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_show_compact_result { + static $_TSPEC; + + /** + * @var \metastore\ShowCompactResponse + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\ShowCompactResponse', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_show_compact_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\ShowCompactResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_show_compact_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_add_dynamic_partitions_args { + static $_TSPEC; + + /** + * @var \metastore\AddDynamicPartitions + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\AddDynamicPartitions', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_dynamic_partitions_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\AddDynamicPartitions(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_dynamic_partitions_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_add_dynamic_partitions_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchTxnException + */ + public $o1 = null; + /** + * @var \metastore\TxnAbortedException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchTxnException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\TxnAbortedException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_add_dynamic_partitions_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchTxnException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\TxnAbortedException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_dynamic_partitions_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_next_notification_args { + static $_TSPEC; + + /** + * @var \metastore\NotificationEventRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\NotificationEventRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_notification_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\NotificationEventRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_notification_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_next_notification_result { + static $_TSPEC; + + /** + * @var \metastore\NotificationEventResponse + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\NotificationEventResponse', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_notification_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\NotificationEventResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_notification_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_current_notificationEventId_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_current_notificationEventId_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_current_notificationEventId_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_current_notificationEventId_result { + static $_TSPEC; + + /** + * @var \metastore\CurrentNotificationEventId + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\CurrentNotificationEventId', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_current_notificationEventId_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\CurrentNotificationEventId(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_current_notificationEventId_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_fire_listener_event_args { + static $_TSPEC; + + /** + * @var \metastore\FireEventRequest + */ + public $rqst = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'rqst', + 'type' => TType::STRUCT, + 'class' => '\metastore\FireEventRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['rqst'])) { + $this->rqst = $vals['rqst']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_fire_listener_event_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->rqst = new \metastore\FireEventRequest(); + $xfer += $this->rqst->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_fire_listener_event_args'); + if ($this->rqst !== null) { + if (!is_object($this->rqst)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); + $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_fire_listener_event_result { + static $_TSPEC; + + /** + * @var \metastore\FireEventResponse + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\FireEventResponse', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_fire_listener_event_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\FireEventResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_fire_listener_event_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_flushCache_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_flushCache_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_flushCache_args'); $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -36864,33 +37980,19 @@ class ThriftHiveMetastore_compact_result { } -class ThriftHiveMetastore_show_compact_args { +class ThriftHiveMetastore_flushCache_result { static $_TSPEC; - /** - * @var \metastore\ShowCompactRequest - */ - public $rqst = null; - public function __construct($vals=null) { + public function __construct() { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'rqst', - 'type' => TType::STRUCT, - 'class' => '\metastore\ShowCompactRequest', - ), ); } - if (is_array($vals)) { - if (isset($vals['rqst'])) { - $this->rqst = $vals['rqst']; - } - } } public function getName() { - return 'ThriftHiveMetastore_show_compact_args'; + return 'ThriftHiveMetastore_flushCache_result'; } public function read($input) @@ -36908,14 +38010,6 @@ class ThriftHiveMetastore_show_compact_args { } switch ($fid) { - case 1: - if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\ShowCompactRequest(); - $xfer += $this->rqst->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -36928,15 +38022,7 @@ class ThriftHiveMetastore_show_compact_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_show_compact_args'); - if ($this->rqst !== null) { - if (!is_object($this->rqst)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); - $xfer += $this->rqst->write($output); - $xfer += $output->writeFieldEnd(); - } + $xfer += $output->writeStructBegin('ThriftHiveMetastore_flushCache_result'); $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -36944,33 +38030,33 @@ class ThriftHiveMetastore_show_compact_args { } -class ThriftHiveMetastore_show_compact_result { +class ThriftHiveMetastore_get_file_metadata_by_expr_args { static $_TSPEC; /** - * @var \metastore\ShowCompactResponse + * @var \metastore\GetFileMetadataByExprRequest */ - public $success = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 0 => array( - 'var' => 'success', + 1 => array( + 'var' => 'req', 'type' => TType::STRUCT, - 'class' => '\metastore\ShowCompactResponse', + 'class' => '\metastore\GetFileMetadataByExprRequest', ), ); } if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } public function getName() { - return 'ThriftHiveMetastore_show_compact_result'; + return 'ThriftHiveMetastore_get_file_metadata_by_expr_args'; } public function read($input) @@ -36988,10 +38074,10 @@ class ThriftHiveMetastore_show_compact_result { } switch ($fid) { - case 0: + case 1: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\ShowCompactResponse(); - $xfer += $this->success->read($input); + $this->req = new \metastore\GetFileMetadataByExprRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -37008,13 +38094,13 @@ class ThriftHiveMetastore_show_compact_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_show_compact_result'); - if ($this->success !== null) { - if (!is_object($this->success)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_file_metadata_by_expr_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); - $xfer += $this->success->write($output); + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37024,33 +38110,33 @@ class ThriftHiveMetastore_show_compact_result { } -class ThriftHiveMetastore_add_dynamic_partitions_args { +class ThriftHiveMetastore_get_file_metadata_by_expr_result { static $_TSPEC; /** - * @var \metastore\AddDynamicPartitions + * @var \metastore\GetFileMetadataByExprResult */ - public $rqst = null; + public $success = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'rqst', + 0 => array( + 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\AddDynamicPartitions', + 'class' => '\metastore\GetFileMetadataByExprResult', ), ); } if (is_array($vals)) { - if (isset($vals['rqst'])) { - $this->rqst = $vals['rqst']; + if (isset($vals['success'])) { + $this->success = $vals['success']; } } } public function getName() { - return 'ThriftHiveMetastore_add_dynamic_partitions_args'; + return 'ThriftHiveMetastore_get_file_metadata_by_expr_result'; } public function read($input) @@ -37068,10 +38154,10 @@ class ThriftHiveMetastore_add_dynamic_partitions_args { } switch ($fid) { - case 1: + case 0: if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\AddDynamicPartitions(); - $xfer += $this->rqst->read($input); + $this->success = new \metastore\GetFileMetadataByExprResult(); + $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); } @@ -37088,13 +38174,13 @@ class ThriftHiveMetastore_add_dynamic_partitions_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_dynamic_partitions_args'); - if ($this->rqst !== null) { - if (!is_object($this->rqst)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_file_metadata_by_expr_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); - $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37104,45 +38190,33 @@ class ThriftHiveMetastore_add_dynamic_partitions_args { } -class ThriftHiveMetastore_add_dynamic_partitions_result { +class ThriftHiveMetastore_get_file_metadata_args { static $_TSPEC; /** - * @var \metastore\NoSuchTxnException - */ - public $o1 = null; - /** - * @var \metastore\TxnAbortedException + * @var \metastore\GetFileMetadataRequest */ - public $o2 = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'o1', + 'var' => 'req', 'type' => TType::STRUCT, - 'class' => '\metastore\NoSuchTxnException', - ), - 2 => array( - 'var' => 'o2', - 'type' => TType::STRUCT, - 'class' => '\metastore\TxnAbortedException', + 'class' => '\metastore\GetFileMetadataRequest', ), ); } if (is_array($vals)) { - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - if (isset($vals['o2'])) { - $this->o2 = $vals['o2']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } public function getName() { - return 'ThriftHiveMetastore_add_dynamic_partitions_result'; + return 'ThriftHiveMetastore_get_file_metadata_args'; } public function read($input) @@ -37162,16 +38236,8 @@ class ThriftHiveMetastore_add_dynamic_partitions_result { { case 1: if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\NoSuchTxnException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRUCT) { - $this->o2 = new \metastore\TxnAbortedException(); - $xfer += $this->o2->read($input); + $this->req = new \metastore\GetFileMetadataRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -37188,15 +38254,13 @@ class ThriftHiveMetastore_add_dynamic_partitions_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_dynamic_partitions_result'); - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); - } - if ($this->o2 !== null) { - $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); - $xfer += $this->o2->write($output); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_file_metadata_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37206,33 +38270,33 @@ class ThriftHiveMetastore_add_dynamic_partitions_result { } -class ThriftHiveMetastore_get_next_notification_args { +class ThriftHiveMetastore_get_file_metadata_result { static $_TSPEC; /** - * @var \metastore\NotificationEventRequest + * @var \metastore\GetFileMetadataResult */ - public $rqst = null; + public $success = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'rqst', + 0 => array( + 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\NotificationEventRequest', + 'class' => '\metastore\GetFileMetadataResult', ), ); } if (is_array($vals)) { - if (isset($vals['rqst'])) { - $this->rqst = $vals['rqst']; + if (isset($vals['success'])) { + $this->success = $vals['success']; } } } public function getName() { - return 'ThriftHiveMetastore_get_next_notification_args'; + return 'ThriftHiveMetastore_get_file_metadata_result'; } public function read($input) @@ -37250,10 +38314,10 @@ class ThriftHiveMetastore_get_next_notification_args { } switch ($fid) { - case 1: + case 0: if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\NotificationEventRequest(); - $xfer += $this->rqst->read($input); + $this->success = new \metastore\GetFileMetadataResult(); + $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); } @@ -37270,13 +38334,13 @@ class ThriftHiveMetastore_get_next_notification_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_notification_args'); - if ($this->rqst !== null) { - if (!is_object($this->rqst)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_file_metadata_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); - $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37286,33 +38350,33 @@ class ThriftHiveMetastore_get_next_notification_args { } -class ThriftHiveMetastore_get_next_notification_result { +class ThriftHiveMetastore_put_file_metadata_args { static $_TSPEC; /** - * @var \metastore\NotificationEventResponse + * @var \metastore\PutFileMetadataRequest */ - public $success = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 0 => array( - 'var' => 'success', + 1 => array( + 'var' => 'req', 'type' => TType::STRUCT, - 'class' => '\metastore\NotificationEventResponse', + 'class' => '\metastore\PutFileMetadataRequest', ), ); } if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } public function getName() { - return 'ThriftHiveMetastore_get_next_notification_result'; + return 'ThriftHiveMetastore_put_file_metadata_args'; } public function read($input) @@ -37330,10 +38394,10 @@ class ThriftHiveMetastore_get_next_notification_result { } switch ($fid) { - case 0: + case 1: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\NotificationEventResponse(); - $xfer += $this->success->read($input); + $this->req = new \metastore\PutFileMetadataRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -37350,13 +38414,13 @@ class ThriftHiveMetastore_get_next_notification_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_notification_result'); - if ($this->success !== null) { - if (!is_object($this->success)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_put_file_metadata_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); - $xfer += $this->success->write($output); + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37366,61 +38430,11 @@ class ThriftHiveMetastore_get_next_notification_result { } -class ThriftHiveMetastore_get_current_notificationEventId_args { - static $_TSPEC; - - - public function __construct() { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - ); - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_current_notificationEventId_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_current_notificationEventId_args'); - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_current_notificationEventId_result { +class ThriftHiveMetastore_put_file_metadata_result { static $_TSPEC; /** - * @var \metastore\CurrentNotificationEventId + * @var \metastore\PutFileMetadataResult */ public $success = null; @@ -37430,7 +38444,7 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\CurrentNotificationEventId', + 'class' => '\metastore\PutFileMetadataResult', ), ); } @@ -37442,7 +38456,7 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { } public function getName() { - return 'ThriftHiveMetastore_get_current_notificationEventId_result'; + return 'ThriftHiveMetastore_put_file_metadata_result'; } public function read($input) @@ -37462,7 +38476,7 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\CurrentNotificationEventId(); + $this->success = new \metastore\PutFileMetadataResult(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -37480,7 +38494,7 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_current_notificationEventId_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_put_file_metadata_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -37496,33 +38510,33 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { } -class ThriftHiveMetastore_fire_listener_event_args { +class ThriftHiveMetastore_clear_file_metadata_args { static $_TSPEC; /** - * @var \metastore\FireEventRequest + * @var \metastore\ClearFileMetadataRequest */ - public $rqst = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'rqst', + 'var' => 'req', 'type' => TType::STRUCT, - 'class' => '\metastore\FireEventRequest', + 'class' => '\metastore\ClearFileMetadataRequest', ), ); } if (is_array($vals)) { - if (isset($vals['rqst'])) { - $this->rqst = $vals['rqst']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } public function getName() { - return 'ThriftHiveMetastore_fire_listener_event_args'; + return 'ThriftHiveMetastore_clear_file_metadata_args'; } public function read($input) @@ -37542,8 +38556,8 @@ class ThriftHiveMetastore_fire_listener_event_args { { case 1: if ($ftype == TType::STRUCT) { - $this->rqst = new \metastore\FireEventRequest(); - $xfer += $this->rqst->read($input); + $this->req = new \metastore\ClearFileMetadataRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -37560,13 +38574,13 @@ class ThriftHiveMetastore_fire_listener_event_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_fire_listener_event_args'); - if ($this->rqst !== null) { - if (!is_object($this->rqst)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_clear_file_metadata_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('rqst', TType::STRUCT, 1); - $xfer += $this->rqst->write($output); + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -37576,11 +38590,11 @@ class ThriftHiveMetastore_fire_listener_event_args { } -class ThriftHiveMetastore_fire_listener_event_result { +class ThriftHiveMetastore_clear_file_metadata_result { static $_TSPEC; /** - * @var \metastore\FireEventResponse + * @var \metastore\ClearFileMetadataResult */ public $success = null; @@ -37590,7 +38604,7 @@ class ThriftHiveMetastore_fire_listener_event_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\FireEventResponse', + 'class' => '\metastore\ClearFileMetadataResult', ), ); } @@ -37602,7 +38616,7 @@ class ThriftHiveMetastore_fire_listener_event_result { } public function getName() { - return 'ThriftHiveMetastore_fire_listener_event_result'; + return 'ThriftHiveMetastore_clear_file_metadata_result'; } public function read($input) @@ -37622,7 +38636,7 @@ class ThriftHiveMetastore_fire_listener_event_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\FireEventResponse(); + $this->success = new \metastore\ClearFileMetadataResult(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -37640,7 +38654,7 @@ class ThriftHiveMetastore_fire_listener_event_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_fire_listener_event_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_clear_file_metadata_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 949a6e9..0baeef3 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -13861,6 +13861,995 @@ class FireEventResponse { } +class MetadataPpdResult { + static $_TSPEC; + + /** + * @var string + */ + public $metadata = null; + /** + * @var string + */ + public $includeBitset = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'metadata', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'includeBitset', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + if (isset($vals['includeBitset'])) { + $this->includeBitset = $vals['includeBitset']; + } + } + } + + public function getName() { + return 'MetadataPpdResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->metadata); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->includeBitset); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('MetadataPpdResult'); + if ($this->metadata !== null) { + $xfer += $output->writeFieldBegin('metadata', TType::STRING, 1); + $xfer += $output->writeString($this->metadata); + $xfer += $output->writeFieldEnd(); + } + if ($this->includeBitset !== null) { + $xfer += $output->writeFieldBegin('includeBitset', TType::STRING, 2); + $xfer += $output->writeString($this->includeBitset); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetFileMetadataByExprResult { + static $_TSPEC; + + /** + * @var array + */ + public $metadata = null; + /** + * @var bool + */ + public $isSupported = null; + /** + * @var int[] + */ + public $unknownFileIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'metadata', + 'type' => TType::MAP, + 'ktype' => TType::I64, + 'vtype' => TType::STRUCT, + 'key' => array( + 'type' => TType::I64, + ), + 'val' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\MetadataPpdResult', + ), + ), + 2 => array( + 'var' => 'isSupported', + 'type' => TType::BOOL, + ), + 3 => array( + 'var' => 'unknownFileIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + if (isset($vals['isSupported'])) { + $this->isSupported = $vals['isSupported']; + } + if (isset($vals['unknownFileIds'])) { + $this->unknownFileIds = $vals['unknownFileIds']; + } + } + } + + public function getName() { + return 'GetFileMetadataByExprResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::MAP) { + $this->metadata = array(); + $_size465 = 0; + $_ktype466 = 0; + $_vtype467 = 0; + $xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465); + for ($_i469 = 0; $_i469 < $_size465; ++$_i469) + { + $key470 = 0; + $val471 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key470); + $val471 = new \metastore\MetadataPpdResult(); + $xfer += $val471->read($input); + $this->metadata[$key470] = $val471; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isSupported); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->unknownFileIds = array(); + $_size472 = 0; + $_etype475 = 0; + $xfer += $input->readListBegin($_etype475, $_size472); + for ($_i476 = 0; $_i476 < $_size472; ++$_i476) + { + $elem477 = null; + $xfer += $input->readI64($elem477); + $this->unknownFileIds []= $elem477; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetFileMetadataByExprResult'); + if ($this->metadata !== null) { + if (!is_array($this->metadata)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('metadata', TType::MAP, 1); + { + $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); + { + foreach ($this->metadata as $kiter478 => $viter479) + { + $xfer += $output->writeI64($kiter478); + $xfer += $viter479->write($output); + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->isSupported !== null) { + $xfer += $output->writeFieldBegin('isSupported', TType::BOOL, 2); + $xfer += $output->writeBool($this->isSupported); + $xfer += $output->writeFieldEnd(); + } + if ($this->unknownFileIds !== null) { + if (!is_array($this->unknownFileIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('unknownFileIds', TType::LST, 3); + { + $output->writeListBegin(TType::I64, count($this->unknownFileIds)); + { + foreach ($this->unknownFileIds as $iter480) + { + $xfer += $output->writeI64($iter480); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetFileMetadataByExprRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $fileIds = null; + /** + * @var string + */ + public $expr = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fileIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 2 => array( + 'var' => 'expr', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fileIds'])) { + $this->fileIds = $vals['fileIds']; + } + if (isset($vals['expr'])) { + $this->expr = $vals['expr']; + } + } + } + + public function getName() { + return 'GetFileMetadataByExprRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->fileIds = array(); + $_size481 = 0; + $_etype484 = 0; + $xfer += $input->readListBegin($_etype484, $_size481); + for ($_i485 = 0; $_i485 < $_size481; ++$_i485) + { + $elem486 = null; + $xfer += $input->readI64($elem486); + $this->fileIds []= $elem486; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->expr); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetFileMetadataByExprRequest'); + if ($this->fileIds !== null) { + if (!is_array($this->fileIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('fileIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->fileIds)); + { + foreach ($this->fileIds as $iter487) + { + $xfer += $output->writeI64($iter487); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->expr !== null) { + $xfer += $output->writeFieldBegin('expr', TType::STRING, 2); + $xfer += $output->writeString($this->expr); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetFileMetadataResult { + static $_TSPEC; + + /** + * @var array + */ + public $metadata = null; + /** + * @var bool + */ + public $isSupported = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'metadata', + 'type' => TType::MAP, + 'ktype' => TType::I64, + 'vtype' => TType::STRING, + 'key' => array( + 'type' => TType::I64, + ), + 'val' => array( + 'type' => TType::STRING, + ), + ), + 2 => array( + 'var' => 'isSupported', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + if (isset($vals['isSupported'])) { + $this->isSupported = $vals['isSupported']; + } + } + } + + public function getName() { + return 'GetFileMetadataResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::MAP) { + $this->metadata = array(); + $_size488 = 0; + $_ktype489 = 0; + $_vtype490 = 0; + $xfer += $input->readMapBegin($_ktype489, $_vtype490, $_size488); + for ($_i492 = 0; $_i492 < $_size488; ++$_i492) + { + $key493 = 0; + $val494 = ''; + $xfer += $input->readI64($key493); + $xfer += $input->readString($val494); + $this->metadata[$key493] = $val494; + } + $xfer += $input->readMapEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->isSupported); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetFileMetadataResult'); + if ($this->metadata !== null) { + if (!is_array($this->metadata)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('metadata', TType::MAP, 1); + { + $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); + { + foreach ($this->metadata as $kiter495 => $viter496) + { + $xfer += $output->writeI64($kiter495); + $xfer += $output->writeString($viter496); + } + } + $output->writeMapEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->isSupported !== null) { + $xfer += $output->writeFieldBegin('isSupported', TType::BOOL, 2); + $xfer += $output->writeBool($this->isSupported); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetFileMetadataRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $fileIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fileIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fileIds'])) { + $this->fileIds = $vals['fileIds']; + } + } + } + + public function getName() { + return 'GetFileMetadataRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->fileIds = array(); + $_size497 = 0; + $_etype500 = 0; + $xfer += $input->readListBegin($_etype500, $_size497); + for ($_i501 = 0; $_i501 < $_size497; ++$_i501) + { + $elem502 = null; + $xfer += $input->readI64($elem502); + $this->fileIds []= $elem502; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetFileMetadataRequest'); + if ($this->fileIds !== null) { + if (!is_array($this->fileIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('fileIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->fileIds)); + { + foreach ($this->fileIds as $iter503) + { + $xfer += $output->writeI64($iter503); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class PutFileMetadataResult { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'PutFileMetadataResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('PutFileMetadataResult'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class PutFileMetadataRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $fileIds = null; + /** + * @var string[] + */ + public $metadata = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fileIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + 2 => array( + 'var' => 'metadata', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fileIds'])) { + $this->fileIds = $vals['fileIds']; + } + if (isset($vals['metadata'])) { + $this->metadata = $vals['metadata']; + } + } + } + + public function getName() { + return 'PutFileMetadataRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->fileIds = array(); + $_size504 = 0; + $_etype507 = 0; + $xfer += $input->readListBegin($_etype507, $_size504); + for ($_i508 = 0; $_i508 < $_size504; ++$_i508) + { + $elem509 = null; + $xfer += $input->readI64($elem509); + $this->fileIds []= $elem509; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::LST) { + $this->metadata = array(); + $_size510 = 0; + $_etype513 = 0; + $xfer += $input->readListBegin($_etype513, $_size510); + for ($_i514 = 0; $_i514 < $_size510; ++$_i514) + { + $elem515 = null; + $xfer += $input->readString($elem515); + $this->metadata []= $elem515; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('PutFileMetadataRequest'); + if ($this->fileIds !== null) { + if (!is_array($this->fileIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('fileIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->fileIds)); + { + foreach ($this->fileIds as $iter516) + { + $xfer += $output->writeI64($iter516); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->metadata !== null) { + if (!is_array($this->metadata)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('metadata', TType::LST, 2); + { + $output->writeListBegin(TType::STRING, count($this->metadata)); + { + foreach ($this->metadata as $iter517) + { + $xfer += $output->writeString($iter517); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ClearFileMetadataResult { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ClearFileMetadataResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ClearFileMetadataResult'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ClearFileMetadataRequest { + static $_TSPEC; + + /** + * @var int[] + */ + public $fileIds = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fileIds', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fileIds'])) { + $this->fileIds = $vals['fileIds']; + } + } + } + + public function getName() { + return 'ClearFileMetadataRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->fileIds = array(); + $_size518 = 0; + $_etype521 = 0; + $xfer += $input->readListBegin($_etype521, $_size518); + for ($_i522 = 0; $_i522 < $_size518; ++$_i522) + { + $elem523 = null; + $xfer += $input->readI64($elem523); + $this->fileIds []= $elem523; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ClearFileMetadataRequest'); + if ($this->fileIds !== null) { + if (!is_array($this->fileIds)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('fileIds', TType::LST, 1); + { + $output->writeListBegin(TType::I64, count($this->fileIds)); + { + foreach ($this->fileIds as $iter524) + { + $xfer += $output->writeI64($iter524); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class GetAllFunctionsResponse { static $_TSPEC; @@ -13912,15 +14901,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size465 = 0; - $_etype468 = 0; - $xfer += $input->readListBegin($_etype468, $_size465); - for ($_i469 = 0; $_i469 < $_size465; ++$_i469) + $_size525 = 0; + $_etype528 = 0; + $xfer += $input->readListBegin($_etype528, $_size525); + for ($_i529 = 0; $_i529 < $_size525; ++$_i529) { - $elem470 = null; - $elem470 = new \metastore\Function(); - $xfer += $elem470->read($input); - $this->functions []= $elem470; + $elem530 = null; + $elem530 = new \metastore\Function(); + $xfer += $elem530->read($input); + $this->functions []= $elem530; } $xfer += $input->readListEnd(); } else { @@ -13948,9 +14937,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter471) + foreach ($this->functions as $iter531) { - $xfer += $iter471->write($output); + $xfer += $iter531->write($output); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index dc348ef..466063e 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -148,6 +148,11 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' NotificationEventResponse get_next_notification(NotificationEventRequest rqst)') print(' CurrentNotificationEventId get_current_notificationEventId()') print(' FireEventResponse fire_listener_event(FireEventRequest rqst)') + print(' void flushCache()') + print(' GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req)') + print(' GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req)') + print(' PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)') + print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -157,8 +162,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void setOption(string key, string value)') print(' string getOption(string key)') print(' getOptions()') + print(' string getCpuProfile(i32 profileDurationInSec)') print(' i64 aliveSince()') - print(' reflection_limited.Service getLimitedReflection()') print(' void reinitialize()') print(' void shutdown()') print('') @@ -961,6 +966,36 @@ elif cmd == 'fire_listener_event': sys.exit(1) pp.pprint(client.fire_listener_event(eval(args[0]),)) +elif cmd == 'flushCache': + if len(args) != 0: + print('flushCache requires 0 args') + sys.exit(1) + pp.pprint(client.flushCache()) + +elif cmd == 'get_file_metadata_by_expr': + if len(args) != 1: + print('get_file_metadata_by_expr requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata_by_expr(eval(args[0]),)) + +elif cmd == 'get_file_metadata': + if len(args) != 1: + print('get_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata(eval(args[0]),)) + +elif cmd == 'put_file_metadata': + if len(args) != 1: + print('put_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.put_file_metadata(eval(args[0]),)) + +elif cmd == 'clear_file_metadata': + if len(args) != 1: + print('clear_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.clear_file_metadata(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') @@ -1015,18 +1050,18 @@ elif cmd == 'getOptions': sys.exit(1) pp.pprint(client.getOptions()) +elif cmd == 'getCpuProfile': + if len(args) != 1: + print('getCpuProfile requires 1 args') + sys.exit(1) + pp.pprint(client.getCpuProfile(eval(args[0]),)) + elif cmd == 'aliveSince': if len(args) != 0: print('aliveSince requires 0 args') sys.exit(1) pp.pprint(client.aliveSince()) -elif cmd == 'getLimitedReflection': - if len(args) != 0: - print('getLimitedReflection requires 0 args') - sys.exit(1) - pp.pprint(client.getLimitedReflection()) - elif cmd == 'reinitialize': if len(args) != 0: print('reinitialize requires 0 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 9e460f0..f89320f 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1021,6 +1021,37 @@ def fire_listener_event(self, rqst): """ pass + def flushCache(self): + pass + + def get_file_metadata_by_expr(self, req): + """ + Parameters: + - req + """ + pass + + def get_file_metadata(self, req): + """ + Parameters: + - req + """ + pass + + def put_file_metadata(self, req): + """ + Parameters: + - req + """ + pass + + def clear_file_metadata(self, req): + """ + Parameters: + - req + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -5589,6 +5620,154 @@ def recv_fire_listener_event(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "fire_listener_event failed: unknown result"); + def flushCache(self): + self.send_flushCache() + self.recv_flushCache() + + def send_flushCache(self): + self._oprot.writeMessageBegin('flushCache', TMessageType.CALL, self._seqid) + args = flushCache_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_flushCache(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = flushCache_result() + result.read(iprot) + iprot.readMessageEnd() + return + + def get_file_metadata_by_expr(self, req): + """ + Parameters: + - req + """ + self.send_get_file_metadata_by_expr(req) + return self.recv_get_file_metadata_by_expr() + + def send_get_file_metadata_by_expr(self, req): + self._oprot.writeMessageBegin('get_file_metadata_by_expr', TMessageType.CALL, self._seqid) + args = get_file_metadata_by_expr_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_file_metadata_by_expr(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_file_metadata_by_expr_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + + def get_file_metadata(self, req): + """ + Parameters: + - req + """ + self.send_get_file_metadata(req) + return self.recv_get_file_metadata() + + def send_get_file_metadata(self, req): + self._oprot.writeMessageBegin('get_file_metadata', TMessageType.CALL, self._seqid) + args = get_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_file_metadata failed: unknown result"); + + def put_file_metadata(self, req): + """ + Parameters: + - req + """ + self.send_put_file_metadata(req) + return self.recv_put_file_metadata() + + def send_put_file_metadata(self, req): + self._oprot.writeMessageBegin('put_file_metadata', TMessageType.CALL, self._seqid) + args = put_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_put_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = put_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "put_file_metadata failed: unknown result"); + + def clear_file_metadata(self, req): + """ + Parameters: + - req + """ + self.send_clear_file_metadata(req) + return self.recv_clear_file_metadata() + + def send_clear_file_metadata(self, req): + self._oprot.writeMessageBegin('clear_file_metadata', TMessageType.CALL, self._seqid) + args = clear_file_metadata_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_clear_file_metadata(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = clear_file_metadata_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "clear_file_metadata failed: unknown result"); + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -5717,6 +5896,11 @@ def __init__(self, handler): self._processMap["get_next_notification"] = Processor.process_get_next_notification self._processMap["get_current_notificationEventId"] = Processor.process_get_current_notificationEventId self._processMap["fire_listener_event"] = Processor.process_fire_listener_event + self._processMap["flushCache"] = Processor.process_flushCache + self._processMap["get_file_metadata_by_expr"] = Processor.process_get_file_metadata_by_expr + self._processMap["get_file_metadata"] = Processor.process_get_file_metadata + self._processMap["put_file_metadata"] = Processor.process_put_file_metadata + self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -7701,6 +7885,61 @@ def process_fire_listener_event(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_flushCache(self, seqid, iprot, oprot): + args = flushCache_args() + args.read(iprot) + iprot.readMessageEnd() + result = flushCache_result() + self._handler.flushCache() + oprot.writeMessageBegin("flushCache", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_file_metadata_by_expr(self, seqid, iprot, oprot): + args = get_file_metadata_by_expr_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_file_metadata_by_expr_result() + result.success = self._handler.get_file_metadata_by_expr(args.req) + oprot.writeMessageBegin("get_file_metadata_by_expr", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_file_metadata(self, seqid, iprot, oprot): + args = get_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_file_metadata_result() + result.success = self._handler.get_file_metadata(args.req) + oprot.writeMessageBegin("get_file_metadata", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_put_file_metadata(self, seqid, iprot, oprot): + args = put_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = put_file_metadata_result() + result.success = self._handler.put_file_metadata(args.req) + oprot.writeMessageBegin("put_file_metadata", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_clear_file_metadata(self, seqid, iprot, oprot): + args = clear_file_metadata_args() + args.read(iprot) + iprot.readMessageEnd() + result = clear_file_metadata_result() + result.success = self._handler.clear_file_metadata(args.req) + oprot.writeMessageBegin("clear_file_metadata", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -8587,10 +8826,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype472, _size469) = iprot.readListBegin() - for _i473 in xrange(_size469): - _elem474 = iprot.readString(); - self.success.append(_elem474) + (_etype532, _size529) = iprot.readListBegin() + for _i533 in xrange(_size529): + _elem534 = iprot.readString(); + self.success.append(_elem534) iprot.readListEnd() else: iprot.skip(ftype) @@ -8613,8 +8852,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter475 in self.success: - oprot.writeString(iter475) + for iter535 in self.success: + oprot.writeString(iter535) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -8719,10 +8958,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype479, _size476) = iprot.readListBegin() - for _i480 in xrange(_size476): - _elem481 = iprot.readString(); - self.success.append(_elem481) + (_etype539, _size536) = iprot.readListBegin() + for _i540 in xrange(_size536): + _elem541 = iprot.readString(); + self.success.append(_elem541) iprot.readListEnd() else: iprot.skip(ftype) @@ -8745,8 +8984,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter482 in self.success: - oprot.writeString(iter482) + for iter542 in self.success: + oprot.writeString(iter542) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9516,12 +9755,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype484, _vtype485, _size483 ) = iprot.readMapBegin() - for _i487 in xrange(_size483): - _key488 = iprot.readString(); - _val489 = Type() - _val489.read(iprot) - self.success[_key488] = _val489 + (_ktype544, _vtype545, _size543 ) = iprot.readMapBegin() + for _i547 in xrange(_size543): + _key548 = iprot.readString(); + _val549 = Type() + _val549.read(iprot) + self.success[_key548] = _val549 iprot.readMapEnd() else: iprot.skip(ftype) @@ -9544,9 +9783,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter490,viter491 in self.success.items(): - oprot.writeString(kiter490) - viter491.write(oprot) + for kiter550,viter551 in self.success.items(): + oprot.writeString(kiter550) + viter551.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -9689,11 +9928,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype495, _size492) = iprot.readListBegin() - for _i496 in xrange(_size492): - _elem497 = FieldSchema() - _elem497.read(iprot) - self.success.append(_elem497) + (_etype555, _size552) = iprot.readListBegin() + for _i556 in xrange(_size552): + _elem557 = FieldSchema() + _elem557.read(iprot) + self.success.append(_elem557) iprot.readListEnd() else: iprot.skip(ftype) @@ -9728,8 +9967,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter498 in self.success: - iter498.write(oprot) + for iter558 in self.success: + iter558.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9896,11 +10135,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype502, _size499) = iprot.readListBegin() - for _i503 in xrange(_size499): - _elem504 = FieldSchema() - _elem504.read(iprot) - self.success.append(_elem504) + (_etype562, _size559) = iprot.readListBegin() + for _i563 in xrange(_size559): + _elem564 = FieldSchema() + _elem564.read(iprot) + self.success.append(_elem564) iprot.readListEnd() else: iprot.skip(ftype) @@ -9935,8 +10174,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter505 in self.success: - iter505.write(oprot) + for iter565 in self.success: + iter565.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10089,11 +10328,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype509, _size506) = iprot.readListBegin() - for _i510 in xrange(_size506): - _elem511 = FieldSchema() - _elem511.read(iprot) - self.success.append(_elem511) + (_etype569, _size566) = iprot.readListBegin() + for _i570 in xrange(_size566): + _elem571 = FieldSchema() + _elem571.read(iprot) + self.success.append(_elem571) iprot.readListEnd() else: iprot.skip(ftype) @@ -10128,8 +10367,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter512 in self.success: - iter512.write(oprot) + for iter572 in self.success: + iter572.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10296,11 +10535,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype516, _size513) = iprot.readListBegin() - for _i517 in xrange(_size513): - _elem518 = FieldSchema() - _elem518.read(iprot) - self.success.append(_elem518) + (_etype576, _size573) = iprot.readListBegin() + for _i577 in xrange(_size573): + _elem578 = FieldSchema() + _elem578.read(iprot) + self.success.append(_elem578) iprot.readListEnd() else: iprot.skip(ftype) @@ -10335,8 +10574,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter519 in self.success: - iter519.write(oprot) + for iter579 in self.success: + iter579.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11201,10 +11440,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype523, _size520) = iprot.readListBegin() - for _i524 in xrange(_size520): - _elem525 = iprot.readString(); - self.success.append(_elem525) + (_etype583, _size580) = iprot.readListBegin() + for _i584 in xrange(_size580): + _elem585 = iprot.readString(); + self.success.append(_elem585) iprot.readListEnd() else: iprot.skip(ftype) @@ -11227,8 +11466,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter526 in self.success: - oprot.writeString(iter526) + for iter586 in self.success: + oprot.writeString(iter586) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11352,10 +11591,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype530, _size527) = iprot.readListBegin() - for _i531 in xrange(_size527): - _elem532 = iprot.readString(); - self.success.append(_elem532) + (_etype590, _size587) = iprot.readListBegin() + for _i591 in xrange(_size587): + _elem592 = iprot.readString(); + self.success.append(_elem592) iprot.readListEnd() else: iprot.skip(ftype) @@ -11378,8 +11617,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter533 in self.success: - oprot.writeString(iter533) + for iter593 in self.success: + oprot.writeString(iter593) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11615,10 +11854,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype537, _size534) = iprot.readListBegin() - for _i538 in xrange(_size534): - _elem539 = iprot.readString(); - self.tbl_names.append(_elem539) + (_etype597, _size594) = iprot.readListBegin() + for _i598 in xrange(_size594): + _elem599 = iprot.readString(); + self.tbl_names.append(_elem599) iprot.readListEnd() else: iprot.skip(ftype) @@ -11639,8 +11878,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter540 in self.tbl_names: - oprot.writeString(iter540) + for iter600 in self.tbl_names: + oprot.writeString(iter600) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11701,11 +11940,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype544, _size541) = iprot.readListBegin() - for _i545 in xrange(_size541): - _elem546 = Table() - _elem546.read(iprot) - self.success.append(_elem546) + (_etype604, _size601) = iprot.readListBegin() + for _i605 in xrange(_size601): + _elem606 = Table() + _elem606.read(iprot) + self.success.append(_elem606) iprot.readListEnd() else: iprot.skip(ftype) @@ -11740,8 +11979,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter547 in self.success: - iter547.write(oprot) + for iter607 in self.success: + iter607.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11907,10 +12146,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype551, _size548) = iprot.readListBegin() - for _i552 in xrange(_size548): - _elem553 = iprot.readString(); - self.success.append(_elem553) + (_etype611, _size608) = iprot.readListBegin() + for _i612 in xrange(_size608): + _elem613 = iprot.readString(); + self.success.append(_elem613) iprot.readListEnd() else: iprot.skip(ftype) @@ -11945,8 +12184,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter554 in self.success: - oprot.writeString(iter554) + for iter614 in self.success: + oprot.writeString(iter614) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12916,11 +13155,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype558, _size555) = iprot.readListBegin() - for _i559 in xrange(_size555): - _elem560 = Partition() - _elem560.read(iprot) - self.new_parts.append(_elem560) + (_etype618, _size615) = iprot.readListBegin() + for _i619 in xrange(_size615): + _elem620 = Partition() + _elem620.read(iprot) + self.new_parts.append(_elem620) iprot.readListEnd() else: iprot.skip(ftype) @@ -12937,8 +13176,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter561 in self.new_parts: - iter561.write(oprot) + for iter621 in self.new_parts: + iter621.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13096,11 +13335,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype565, _size562) = iprot.readListBegin() - for _i566 in xrange(_size562): - _elem567 = PartitionSpec() - _elem567.read(iprot) - self.new_parts.append(_elem567) + (_etype625, _size622) = iprot.readListBegin() + for _i626 in xrange(_size622): + _elem627 = PartitionSpec() + _elem627.read(iprot) + self.new_parts.append(_elem627) iprot.readListEnd() else: iprot.skip(ftype) @@ -13117,8 +13356,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter568 in self.new_parts: - iter568.write(oprot) + for iter628 in self.new_parts: + iter628.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13292,10 +13531,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype572, _size569) = iprot.readListBegin() - for _i573 in xrange(_size569): - _elem574 = iprot.readString(); - self.part_vals.append(_elem574) + (_etype632, _size629) = iprot.readListBegin() + for _i633 in xrange(_size629): + _elem634 = iprot.readString(); + self.part_vals.append(_elem634) iprot.readListEnd() else: iprot.skip(ftype) @@ -13320,8 +13559,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter575 in self.part_vals: - oprot.writeString(iter575) + for iter635 in self.part_vals: + oprot.writeString(iter635) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13674,10 +13913,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype579, _size576) = iprot.readListBegin() - for _i580 in xrange(_size576): - _elem581 = iprot.readString(); - self.part_vals.append(_elem581) + (_etype639, _size636) = iprot.readListBegin() + for _i640 in xrange(_size636): + _elem641 = iprot.readString(); + self.part_vals.append(_elem641) iprot.readListEnd() else: iprot.skip(ftype) @@ -13708,8 +13947,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter582 in self.part_vals: - oprot.writeString(iter582) + for iter642 in self.part_vals: + oprot.writeString(iter642) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -14304,10 +14543,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype586, _size583) = iprot.readListBegin() - for _i587 in xrange(_size583): - _elem588 = iprot.readString(); - self.part_vals.append(_elem588) + (_etype646, _size643) = iprot.readListBegin() + for _i647 in xrange(_size643): + _elem648 = iprot.readString(); + self.part_vals.append(_elem648) iprot.readListEnd() else: iprot.skip(ftype) @@ -14337,8 +14576,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter589 in self.part_vals: - oprot.writeString(iter589) + for iter649 in self.part_vals: + oprot.writeString(iter649) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -14511,10 +14750,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype593, _size590) = iprot.readListBegin() - for _i594 in xrange(_size590): - _elem595 = iprot.readString(); - self.part_vals.append(_elem595) + (_etype653, _size650) = iprot.readListBegin() + for _i654 in xrange(_size650): + _elem655 = iprot.readString(); + self.part_vals.append(_elem655) iprot.readListEnd() else: iprot.skip(ftype) @@ -14550,8 +14789,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter596 in self.part_vals: - oprot.writeString(iter596) + for iter656 in self.part_vals: + oprot.writeString(iter656) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -15288,10 +15527,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype600, _size597) = iprot.readListBegin() - for _i601 in xrange(_size597): - _elem602 = iprot.readString(); - self.part_vals.append(_elem602) + (_etype660, _size657) = iprot.readListBegin() + for _i661 in xrange(_size657): + _elem662 = iprot.readString(); + self.part_vals.append(_elem662) iprot.readListEnd() else: iprot.skip(ftype) @@ -15316,8 +15555,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter603 in self.part_vals: - oprot.writeString(iter603) + for iter663 in self.part_vals: + oprot.writeString(iter663) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15476,11 +15715,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype605, _vtype606, _size604 ) = iprot.readMapBegin() - for _i608 in xrange(_size604): - _key609 = iprot.readString(); - _val610 = iprot.readString(); - self.partitionSpecs[_key609] = _val610 + (_ktype665, _vtype666, _size664 ) = iprot.readMapBegin() + for _i668 in xrange(_size664): + _key669 = iprot.readString(); + _val670 = iprot.readString(); + self.partitionSpecs[_key669] = _val670 iprot.readMapEnd() else: iprot.skip(ftype) @@ -15517,9 +15756,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter611,viter612 in self.partitionSpecs.items(): - oprot.writeString(kiter611) - oprot.writeString(viter612) + for kiter671,viter672 in self.partitionSpecs.items(): + oprot.writeString(kiter671) + oprot.writeString(viter672) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -15734,10 +15973,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype616, _size613) = iprot.readListBegin() - for _i617 in xrange(_size613): - _elem618 = iprot.readString(); - self.part_vals.append(_elem618) + (_etype676, _size673) = iprot.readListBegin() + for _i677 in xrange(_size673): + _elem678 = iprot.readString(); + self.part_vals.append(_elem678) iprot.readListEnd() else: iprot.skip(ftype) @@ -15749,10 +15988,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype622, _size619) = iprot.readListBegin() - for _i623 in xrange(_size619): - _elem624 = iprot.readString(); - self.group_names.append(_elem624) + (_etype682, _size679) = iprot.readListBegin() + for _i683 in xrange(_size679): + _elem684 = iprot.readString(); + self.group_names.append(_elem684) iprot.readListEnd() else: iprot.skip(ftype) @@ -15777,8 +16016,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter625 in self.part_vals: - oprot.writeString(iter625) + for iter685 in self.part_vals: + oprot.writeString(iter685) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -15788,8 +16027,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter626 in self.group_names: - oprot.writeString(iter626) + for iter686 in self.group_names: + oprot.writeString(iter686) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16218,11 +16457,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype630, _size627) = iprot.readListBegin() - for _i631 in xrange(_size627): - _elem632 = Partition() - _elem632.read(iprot) - self.success.append(_elem632) + (_etype690, _size687) = iprot.readListBegin() + for _i691 in xrange(_size687): + _elem692 = Partition() + _elem692.read(iprot) + self.success.append(_elem692) iprot.readListEnd() else: iprot.skip(ftype) @@ -16251,8 +16490,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter633 in self.success: - iter633.write(oprot) + for iter693 in self.success: + iter693.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16346,10 +16585,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype637, _size634) = iprot.readListBegin() - for _i638 in xrange(_size634): - _elem639 = iprot.readString(); - self.group_names.append(_elem639) + (_etype697, _size694) = iprot.readListBegin() + for _i698 in xrange(_size694): + _elem699 = iprot.readString(); + self.group_names.append(_elem699) iprot.readListEnd() else: iprot.skip(ftype) @@ -16382,8 +16621,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter640 in self.group_names: - oprot.writeString(iter640) + for iter700 in self.group_names: + oprot.writeString(iter700) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16444,11 +16683,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype644, _size641) = iprot.readListBegin() - for _i645 in xrange(_size641): - _elem646 = Partition() - _elem646.read(iprot) - self.success.append(_elem646) + (_etype704, _size701) = iprot.readListBegin() + for _i705 in xrange(_size701): + _elem706 = Partition() + _elem706.read(iprot) + self.success.append(_elem706) iprot.readListEnd() else: iprot.skip(ftype) @@ -16477,8 +16716,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter647 in self.success: - iter647.write(oprot) + for iter707 in self.success: + iter707.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16636,11 +16875,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype651, _size648) = iprot.readListBegin() - for _i652 in xrange(_size648): - _elem653 = PartitionSpec() - _elem653.read(iprot) - self.success.append(_elem653) + (_etype711, _size708) = iprot.readListBegin() + for _i712 in xrange(_size708): + _elem713 = PartitionSpec() + _elem713.read(iprot) + self.success.append(_elem713) iprot.readListEnd() else: iprot.skip(ftype) @@ -16669,8 +16908,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter654 in self.success: - iter654.write(oprot) + for iter714 in self.success: + iter714.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16825,10 +17064,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype658, _size655) = iprot.readListBegin() - for _i659 in xrange(_size655): - _elem660 = iprot.readString(); - self.success.append(_elem660) + (_etype718, _size715) = iprot.readListBegin() + for _i719 in xrange(_size715): + _elem720 = iprot.readString(); + self.success.append(_elem720) iprot.readListEnd() else: iprot.skip(ftype) @@ -16851,8 +17090,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter661 in self.success: - oprot.writeString(iter661) + for iter721 in self.success: + oprot.writeString(iter721) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -16928,10 +17167,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype665, _size662) = iprot.readListBegin() - for _i666 in xrange(_size662): - _elem667 = iprot.readString(); - self.part_vals.append(_elem667) + (_etype725, _size722) = iprot.readListBegin() + for _i726 in xrange(_size722): + _elem727 = iprot.readString(); + self.part_vals.append(_elem727) iprot.readListEnd() else: iprot.skip(ftype) @@ -16961,8 +17200,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter668 in self.part_vals: - oprot.writeString(iter668) + for iter728 in self.part_vals: + oprot.writeString(iter728) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -17026,11 +17265,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype672, _size669) = iprot.readListBegin() - for _i673 in xrange(_size669): - _elem674 = Partition() - _elem674.read(iprot) - self.success.append(_elem674) + (_etype732, _size729) = iprot.readListBegin() + for _i733 in xrange(_size729): + _elem734 = Partition() + _elem734.read(iprot) + self.success.append(_elem734) iprot.readListEnd() else: iprot.skip(ftype) @@ -17059,8 +17298,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter675 in self.success: - iter675.write(oprot) + for iter735 in self.success: + iter735.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17147,10 +17386,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype679, _size676) = iprot.readListBegin() - for _i680 in xrange(_size676): - _elem681 = iprot.readString(); - self.part_vals.append(_elem681) + (_etype739, _size736) = iprot.readListBegin() + for _i740 in xrange(_size736): + _elem741 = iprot.readString(); + self.part_vals.append(_elem741) iprot.readListEnd() else: iprot.skip(ftype) @@ -17167,10 +17406,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype685, _size682) = iprot.readListBegin() - for _i686 in xrange(_size682): - _elem687 = iprot.readString(); - self.group_names.append(_elem687) + (_etype745, _size742) = iprot.readListBegin() + for _i746 in xrange(_size742): + _elem747 = iprot.readString(); + self.group_names.append(_elem747) iprot.readListEnd() else: iprot.skip(ftype) @@ -17195,8 +17434,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter688 in self.part_vals: - oprot.writeString(iter688) + for iter748 in self.part_vals: + oprot.writeString(iter748) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -17210,8 +17449,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter689 in self.group_names: - oprot.writeString(iter689) + for iter749 in self.group_names: + oprot.writeString(iter749) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17273,11 +17512,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype693, _size690) = iprot.readListBegin() - for _i694 in xrange(_size690): - _elem695 = Partition() - _elem695.read(iprot) - self.success.append(_elem695) + (_etype753, _size750) = iprot.readListBegin() + for _i754 in xrange(_size750): + _elem755 = Partition() + _elem755.read(iprot) + self.success.append(_elem755) iprot.readListEnd() else: iprot.skip(ftype) @@ -17306,8 +17545,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter696 in self.success: - iter696.write(oprot) + for iter756 in self.success: + iter756.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17388,10 +17627,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype700, _size697) = iprot.readListBegin() - for _i701 in xrange(_size697): - _elem702 = iprot.readString(); - self.part_vals.append(_elem702) + (_etype760, _size757) = iprot.readListBegin() + for _i761 in xrange(_size757): + _elem762 = iprot.readString(); + self.part_vals.append(_elem762) iprot.readListEnd() else: iprot.skip(ftype) @@ -17421,8 +17660,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter703 in self.part_vals: - oprot.writeString(iter703) + for iter763 in self.part_vals: + oprot.writeString(iter763) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -17486,10 +17725,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype707, _size704) = iprot.readListBegin() - for _i708 in xrange(_size704): - _elem709 = iprot.readString(); - self.success.append(_elem709) + (_etype767, _size764) = iprot.readListBegin() + for _i768 in xrange(_size764): + _elem769 = iprot.readString(); + self.success.append(_elem769) iprot.readListEnd() else: iprot.skip(ftype) @@ -17518,8 +17757,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter710 in self.success: - oprot.writeString(iter710) + for iter770 in self.success: + oprot.writeString(iter770) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17690,11 +17929,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype714, _size711) = iprot.readListBegin() - for _i715 in xrange(_size711): - _elem716 = Partition() - _elem716.read(iprot) - self.success.append(_elem716) + (_etype774, _size771) = iprot.readListBegin() + for _i775 in xrange(_size771): + _elem776 = Partition() + _elem776.read(iprot) + self.success.append(_elem776) iprot.readListEnd() else: iprot.skip(ftype) @@ -17723,8 +17962,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter717 in self.success: - iter717.write(oprot) + for iter777 in self.success: + iter777.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17895,11 +18134,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype721, _size718) = iprot.readListBegin() - for _i722 in xrange(_size718): - _elem723 = PartitionSpec() - _elem723.read(iprot) - self.success.append(_elem723) + (_etype781, _size778) = iprot.readListBegin() + for _i782 in xrange(_size778): + _elem783 = PartitionSpec() + _elem783.read(iprot) + self.success.append(_elem783) iprot.readListEnd() else: iprot.skip(ftype) @@ -17928,8 +18167,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter724 in self.success: - iter724.write(oprot) + for iter784 in self.success: + iter784.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18166,10 +18405,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype728, _size725) = iprot.readListBegin() - for _i729 in xrange(_size725): - _elem730 = iprot.readString(); - self.names.append(_elem730) + (_etype788, _size785) = iprot.readListBegin() + for _i789 in xrange(_size785): + _elem790 = iprot.readString(); + self.names.append(_elem790) iprot.readListEnd() else: iprot.skip(ftype) @@ -18194,8 +18433,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter731 in self.names: - oprot.writeString(iter731) + for iter791 in self.names: + oprot.writeString(iter791) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18254,11 +18493,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype735, _size732) = iprot.readListBegin() - for _i736 in xrange(_size732): - _elem737 = Partition() - _elem737.read(iprot) - self.success.append(_elem737) + (_etype795, _size792) = iprot.readListBegin() + for _i796 in xrange(_size792): + _elem797 = Partition() + _elem797.read(iprot) + self.success.append(_elem797) iprot.readListEnd() else: iprot.skip(ftype) @@ -18287,8 +18526,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter738 in self.success: - iter738.write(oprot) + for iter798 in self.success: + iter798.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18538,11 +18777,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype742, _size739) = iprot.readListBegin() - for _i743 in xrange(_size739): - _elem744 = Partition() - _elem744.read(iprot) - self.new_parts.append(_elem744) + (_etype802, _size799) = iprot.readListBegin() + for _i803 in xrange(_size799): + _elem804 = Partition() + _elem804.read(iprot) + self.new_parts.append(_elem804) iprot.readListEnd() else: iprot.skip(ftype) @@ -18567,8 +18806,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter745 in self.new_parts: - iter745.write(oprot) + for iter805 in self.new_parts: + iter805.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18907,10 +19146,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype749, _size746) = iprot.readListBegin() - for _i750 in xrange(_size746): - _elem751 = iprot.readString(); - self.part_vals.append(_elem751) + (_etype809, _size806) = iprot.readListBegin() + for _i810 in xrange(_size806): + _elem811 = iprot.readString(); + self.part_vals.append(_elem811) iprot.readListEnd() else: iprot.skip(ftype) @@ -18941,8 +19180,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter752 in self.part_vals: - oprot.writeString(iter752) + for iter812 in self.part_vals: + oprot.writeString(iter812) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -19084,10 +19323,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype756, _size753) = iprot.readListBegin() - for _i757 in xrange(_size753): - _elem758 = iprot.readString(); - self.part_vals.append(_elem758) + (_etype816, _size813) = iprot.readListBegin() + for _i817 in xrange(_size813): + _elem818 = iprot.readString(); + self.part_vals.append(_elem818) iprot.readListEnd() else: iprot.skip(ftype) @@ -19109,8 +19348,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter759 in self.part_vals: - oprot.writeString(iter759) + for iter819 in self.part_vals: + oprot.writeString(iter819) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -19468,10 +19707,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype763, _size760) = iprot.readListBegin() - for _i764 in xrange(_size760): - _elem765 = iprot.readString(); - self.success.append(_elem765) + (_etype823, _size820) = iprot.readListBegin() + for _i824 in xrange(_size820): + _elem825 = iprot.readString(); + self.success.append(_elem825) iprot.readListEnd() else: iprot.skip(ftype) @@ -19494,8 +19733,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter766 in self.success: - oprot.writeString(iter766) + for iter826 in self.success: + oprot.writeString(iter826) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19619,11 +19858,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype768, _vtype769, _size767 ) = iprot.readMapBegin() - for _i771 in xrange(_size767): - _key772 = iprot.readString(); - _val773 = iprot.readString(); - self.success[_key772] = _val773 + (_ktype828, _vtype829, _size827 ) = iprot.readMapBegin() + for _i831 in xrange(_size827): + _key832 = iprot.readString(); + _val833 = iprot.readString(); + self.success[_key832] = _val833 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19646,9 +19885,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter774,viter775 in self.success.items(): - oprot.writeString(kiter774) - oprot.writeString(viter775) + for kiter834,viter835 in self.success.items(): + oprot.writeString(kiter834) + oprot.writeString(viter835) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19724,11 +19963,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype777, _vtype778, _size776 ) = iprot.readMapBegin() - for _i780 in xrange(_size776): - _key781 = iprot.readString(); - _val782 = iprot.readString(); - self.part_vals[_key781] = _val782 + (_ktype837, _vtype838, _size836 ) = iprot.readMapBegin() + for _i840 in xrange(_size836): + _key841 = iprot.readString(); + _val842 = iprot.readString(); + self.part_vals[_key841] = _val842 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19758,9 +19997,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter783,viter784 in self.part_vals.items(): - oprot.writeString(kiter783) - oprot.writeString(viter784) + for kiter843,viter844 in self.part_vals.items(): + oprot.writeString(kiter843) + oprot.writeString(viter844) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -19974,11 +20213,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype786, _vtype787, _size785 ) = iprot.readMapBegin() - for _i789 in xrange(_size785): - _key790 = iprot.readString(); - _val791 = iprot.readString(); - self.part_vals[_key790] = _val791 + (_ktype846, _vtype847, _size845 ) = iprot.readMapBegin() + for _i849 in xrange(_size845): + _key850 = iprot.readString(); + _val851 = iprot.readString(); + self.part_vals[_key850] = _val851 iprot.readMapEnd() else: iprot.skip(ftype) @@ -20008,9 +20247,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter792,viter793 in self.part_vals.items(): - oprot.writeString(kiter792) - oprot.writeString(viter793) + for kiter852,viter853 in self.part_vals.items(): + oprot.writeString(kiter852) + oprot.writeString(viter853) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -21065,11 +21304,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype797, _size794) = iprot.readListBegin() - for _i798 in xrange(_size794): - _elem799 = Index() - _elem799.read(iprot) - self.success.append(_elem799) + (_etype857, _size854) = iprot.readListBegin() + for _i858 in xrange(_size854): + _elem859 = Index() + _elem859.read(iprot) + self.success.append(_elem859) iprot.readListEnd() else: iprot.skip(ftype) @@ -21098,8 +21337,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter800 in self.success: - iter800.write(oprot) + for iter860 in self.success: + iter860.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21254,10 +21493,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype804, _size801) = iprot.readListBegin() - for _i805 in xrange(_size801): - _elem806 = iprot.readString(); - self.success.append(_elem806) + (_etype864, _size861) = iprot.readListBegin() + for _i865 in xrange(_size861): + _elem866 = iprot.readString(); + self.success.append(_elem866) iprot.readListEnd() else: iprot.skip(ftype) @@ -21280,8 +21519,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter807 in self.success: - oprot.writeString(iter807) + for iter867 in self.success: + oprot.writeString(iter867) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -23829,10 +24068,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype811, _size808) = iprot.readListBegin() - for _i812 in xrange(_size808): - _elem813 = iprot.readString(); - self.success.append(_elem813) + (_etype871, _size868) = iprot.readListBegin() + for _i872 in xrange(_size868): + _elem873 = iprot.readString(); + self.success.append(_elem873) iprot.readListEnd() else: iprot.skip(ftype) @@ -23855,8 +24094,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter814 in self.success: - oprot.writeString(iter814) + for iter874 in self.success: + oprot.writeString(iter874) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24544,10 +24783,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype818, _size815) = iprot.readListBegin() - for _i819 in xrange(_size815): - _elem820 = iprot.readString(); - self.success.append(_elem820) + (_etype878, _size875) = iprot.readListBegin() + for _i879 in xrange(_size875): + _elem880 = iprot.readString(); + self.success.append(_elem880) iprot.readListEnd() else: iprot.skip(ftype) @@ -24570,8 +24809,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter821 in self.success: - oprot.writeString(iter821) + for iter881 in self.success: + oprot.writeString(iter881) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25085,11 +25324,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype825, _size822) = iprot.readListBegin() - for _i826 in xrange(_size822): - _elem827 = Role() - _elem827.read(iprot) - self.success.append(_elem827) + (_etype885, _size882) = iprot.readListBegin() + for _i886 in xrange(_size882): + _elem887 = Role() + _elem887.read(iprot) + self.success.append(_elem887) iprot.readListEnd() else: iprot.skip(ftype) @@ -25112,8 +25351,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter828 in self.success: - iter828.write(oprot) + for iter888 in self.success: + iter888.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25622,10 +25861,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype832, _size829) = iprot.readListBegin() - for _i833 in xrange(_size829): - _elem834 = iprot.readString(); - self.group_names.append(_elem834) + (_etype892, _size889) = iprot.readListBegin() + for _i893 in xrange(_size889): + _elem894 = iprot.readString(); + self.group_names.append(_elem894) iprot.readListEnd() else: iprot.skip(ftype) @@ -25650,8 +25889,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter835 in self.group_names: - oprot.writeString(iter835) + for iter895 in self.group_names: + oprot.writeString(iter895) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25878,11 +26117,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype839, _size836) = iprot.readListBegin() - for _i840 in xrange(_size836): - _elem841 = HiveObjectPrivilege() - _elem841.read(iprot) - self.success.append(_elem841) + (_etype899, _size896) = iprot.readListBegin() + for _i900 in xrange(_size896): + _elem901 = HiveObjectPrivilege() + _elem901.read(iprot) + self.success.append(_elem901) iprot.readListEnd() else: iprot.skip(ftype) @@ -25905,8 +26144,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter842 in self.success: - iter842.write(oprot) + for iter902 in self.success: + iter902.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26404,10 +26643,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = iprot.readString(); - self.group_names.append(_elem848) + (_etype906, _size903) = iprot.readListBegin() + for _i907 in xrange(_size903): + _elem908 = iprot.readString(); + self.group_names.append(_elem908) iprot.readListEnd() else: iprot.skip(ftype) @@ -26428,8 +26667,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter849 in self.group_names: - oprot.writeString(iter849) + for iter909 in self.group_names: + oprot.writeString(iter909) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26484,10 +26723,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype853, _size850) = iprot.readListBegin() - for _i854 in xrange(_size850): - _elem855 = iprot.readString(); - self.success.append(_elem855) + (_etype913, _size910) = iprot.readListBegin() + for _i914 in xrange(_size910): + _elem915 = iprot.readString(); + self.success.append(_elem915) iprot.readListEnd() else: iprot.skip(ftype) @@ -26510,8 +26749,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter856 in self.success: - oprot.writeString(iter856) + for iter916 in self.success: + oprot.writeString(iter916) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29264,3 +29503,619 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +class flushCache_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('flushCache_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class flushCache_result: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('flushCache_result') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_file_metadata_by_expr_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataByExprRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_file_metadata_by_expr_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_file_metadata_by_expr_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetFileMetadataByExprResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_file_metadata_by_expr_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_file_metadata_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_file_metadata_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class put_file_metadata_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = PutFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('put_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class put_file_metadata_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = PutFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('put_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class clear_file_metadata_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = ClearFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clear_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class clear_file_metadata_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ClearFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clear_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 0b80390..7fcdd7e 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -9750,6 +9750,726 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class MetadataPpdResult: + """ + Attributes: + - metadata + - includeBitset + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'metadata', None, None, ), # 1 + (2, TType.STRING, 'includeBitset', None, None, ), # 2 + ) + + def __init__(self, metadata=None, includeBitset=None,): + self.metadata = metadata + self.includeBitset = includeBitset + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.metadata = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.includeBitset = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('MetadataPpdResult') + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.STRING, 1) + oprot.writeString(self.metadata) + oprot.writeFieldEnd() + if self.includeBitset is not None: + oprot.writeFieldBegin('includeBitset', TType.STRING, 2) + oprot.writeString(self.includeBitset) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.metadata is None: + raise TProtocol.TProtocolException(message='Required field metadata is unset!') + if self.includeBitset is None: + raise TProtocol.TProtocolException(message='Required field includeBitset is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.metadata) + value = (value * 31) ^ hash(self.includeBitset) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetFileMetadataByExprResult: + """ + Attributes: + - metadata + - isSupported + - unknownFileIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.MAP, 'metadata', (TType.I64,None,TType.STRUCT,(MetadataPpdResult, MetadataPpdResult.thrift_spec)), None, ), # 1 + (2, TType.BOOL, 'isSupported', None, None, ), # 2 + (3, TType.LIST, 'unknownFileIds', (TType.I64,None), None, ), # 3 + ) + + def __init__(self, metadata=None, isSupported=None, unknownFileIds=None,): + self.metadata = metadata + self.isSupported = isSupported + self.unknownFileIds = unknownFileIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.metadata = {} + (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin() + for _i466 in xrange(_size462): + _key467 = iprot.readI64(); + _val468 = MetadataPpdResult() + _val468.read(iprot) + self.metadata[_key467] = _val468 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isSupported = iprot.readBool(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.unknownFileIds = [] + (_etype472, _size469) = iprot.readListBegin() + for _i473 in xrange(_size469): + _elem474 = iprot.readI64(); + self.unknownFileIds.append(_elem474) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetFileMetadataByExprResult') + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.MAP, 1) + oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) + for kiter475,viter476 in self.metadata.items(): + oprot.writeI64(kiter475) + viter476.write(oprot) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.isSupported is not None: + oprot.writeFieldBegin('isSupported', TType.BOOL, 2) + oprot.writeBool(self.isSupported) + oprot.writeFieldEnd() + if self.unknownFileIds is not None: + oprot.writeFieldBegin('unknownFileIds', TType.LIST, 3) + oprot.writeListBegin(TType.I64, len(self.unknownFileIds)) + for iter477 in self.unknownFileIds: + oprot.writeI64(iter477) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.metadata is None: + raise TProtocol.TProtocolException(message='Required field metadata is unset!') + if self.isSupported is None: + raise TProtocol.TProtocolException(message='Required field isSupported is unset!') + if self.unknownFileIds is None: + raise TProtocol.TProtocolException(message='Required field unknownFileIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.metadata) + value = (value * 31) ^ hash(self.isSupported) + value = (value * 31) ^ hash(self.unknownFileIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetFileMetadataByExprRequest: + """ + Attributes: + - fileIds + - expr + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'fileIds', (TType.I64,None), None, ), # 1 + (2, TType.STRING, 'expr', None, None, ), # 2 + ) + + def __init__(self, fileIds=None, expr=None,): + self.fileIds = fileIds + self.expr = expr + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype481, _size478) = iprot.readListBegin() + for _i482 in xrange(_size478): + _elem483 = iprot.readI64(); + self.fileIds.append(_elem483) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.expr = iprot.readString(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetFileMetadataByExprRequest') + if self.fileIds is not None: + oprot.writeFieldBegin('fileIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter484 in self.fileIds: + oprot.writeI64(iter484) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.expr is not None: + oprot.writeFieldBegin('expr', TType.STRING, 2) + oprot.writeString(self.expr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocol.TProtocolException(message='Required field fileIds is unset!') + if self.expr is None: + raise TProtocol.TProtocolException(message='Required field expr is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fileIds) + value = (value * 31) ^ hash(self.expr) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetFileMetadataResult: + """ + Attributes: + - metadata + - isSupported + """ + + thrift_spec = ( + None, # 0 + (1, TType.MAP, 'metadata', (TType.I64,None,TType.STRING,None), None, ), # 1 + (2, TType.BOOL, 'isSupported', None, None, ), # 2 + ) + + def __init__(self, metadata=None, isSupported=None,): + self.metadata = metadata + self.isSupported = isSupported + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.MAP: + self.metadata = {} + (_ktype486, _vtype487, _size485 ) = iprot.readMapBegin() + for _i489 in xrange(_size485): + _key490 = iprot.readI64(); + _val491 = iprot.readString(); + self.metadata[_key490] = _val491 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.isSupported = iprot.readBool(); + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetFileMetadataResult') + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.MAP, 1) + oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) + for kiter492,viter493 in self.metadata.items(): + oprot.writeI64(kiter492) + oprot.writeString(viter493) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.isSupported is not None: + oprot.writeFieldBegin('isSupported', TType.BOOL, 2) + oprot.writeBool(self.isSupported) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.metadata is None: + raise TProtocol.TProtocolException(message='Required field metadata is unset!') + if self.isSupported is None: + raise TProtocol.TProtocolException(message='Required field isSupported is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.metadata) + value = (value * 31) ^ hash(self.isSupported) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetFileMetadataRequest: + """ + Attributes: + - fileIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'fileIds', (TType.I64,None), None, ), # 1 + ) + + def __init__(self, fileIds=None,): + self.fileIds = fileIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype497, _size494) = iprot.readListBegin() + for _i498 in xrange(_size494): + _elem499 = iprot.readI64(); + self.fileIds.append(_elem499) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetFileMetadataRequest') + if self.fileIds is not None: + oprot.writeFieldBegin('fileIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter500 in self.fileIds: + oprot.writeI64(iter500) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocol.TProtocolException(message='Required field fileIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fileIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class PutFileMetadataResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('PutFileMetadataResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class PutFileMetadataRequest: + """ + Attributes: + - fileIds + - metadata + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'fileIds', (TType.I64,None), None, ), # 1 + (2, TType.LIST, 'metadata', (TType.STRING,None), None, ), # 2 + ) + + def __init__(self, fileIds=None, metadata=None,): + self.fileIds = fileIds + self.metadata = metadata + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype504, _size501) = iprot.readListBegin() + for _i505 in xrange(_size501): + _elem506 = iprot.readI64(); + self.fileIds.append(_elem506) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.metadata = [] + (_etype510, _size507) = iprot.readListBegin() + for _i511 in xrange(_size507): + _elem512 = iprot.readString(); + self.metadata.append(_elem512) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('PutFileMetadataRequest') + if self.fileIds is not None: + oprot.writeFieldBegin('fileIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter513 in self.fileIds: + oprot.writeI64(iter513) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.metadata is not None: + oprot.writeFieldBegin('metadata', TType.LIST, 2) + oprot.writeListBegin(TType.STRING, len(self.metadata)) + for iter514 in self.metadata: + oprot.writeString(iter514) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocol.TProtocolException(message='Required field fileIds is unset!') + if self.metadata is None: + raise TProtocol.TProtocolException(message='Required field metadata is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fileIds) + value = (value * 31) ^ hash(self.metadata) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ClearFileMetadataResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ClearFileMetadataResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class ClearFileMetadataRequest: + """ + Attributes: + - fileIds + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'fileIds', (TType.I64,None), None, ), # 1 + ) + + def __init__(self, fileIds=None,): + self.fileIds = fileIds + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fileIds = [] + (_etype518, _size515) = iprot.readListBegin() + for _i519 in xrange(_size515): + _elem520 = iprot.readI64(); + self.fileIds.append(_elem520) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ClearFileMetadataRequest') + if self.fileIds is not None: + oprot.writeFieldBegin('fileIds', TType.LIST, 1) + oprot.writeListBegin(TType.I64, len(self.fileIds)) + for iter521 in self.fileIds: + oprot.writeI64(iter521) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.fileIds is None: + raise TProtocol.TProtocolException(message='Required field fileIds is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fileIds) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetAllFunctionsResponse: """ Attributes: @@ -9776,11 +10496,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype465, _size462) = iprot.readListBegin() - for _i466 in xrange(_size462): - _elem467 = Function() - _elem467.read(iprot) - self.functions.append(_elem467) + (_etype525, _size522) = iprot.readListBegin() + for _i526 in xrange(_size522): + _elem527 = Function() + _elem527.read(iprot) + self.functions.append(_elem527) iprot.readListEnd() else: iprot.skip(ftype) @@ -9797,8 +10517,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter468 in self.functions: - iter468.write(oprot) + for iter528 in self.functions: + iter528.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 4bd4302..771de51 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2231,6 +2231,173 @@ class FireEventResponse ::Thrift::Struct.generate_accessors self end +class MetadataPpdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + METADATA = 1 + INCLUDEBITSET = 2 + + FIELDS = { + METADATA => {:type => ::Thrift::Types::STRING, :name => 'metadata', :binary => true}, + INCLUDEBITSET => {:type => ::Thrift::Types::STRING, :name => 'includeBitset', :binary => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field metadata is unset!') unless @metadata + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field includeBitset is unset!') unless @includeBitset + end + + ::Thrift::Struct.generate_accessors self +end + +class GetFileMetadataByExprResult + include ::Thrift::Struct, ::Thrift::Struct_Union + METADATA = 1 + ISSUPPORTED = 2 + UNKNOWNFILEIDS = 3 + + FIELDS = { + METADATA => {:type => ::Thrift::Types::MAP, :name => 'metadata', :key => {:type => ::Thrift::Types::I64}, :value => {:type => ::Thrift::Types::STRUCT, :class => ::MetadataPpdResult}}, + ISSUPPORTED => {:type => ::Thrift::Types::BOOL, :name => 'isSupported'}, + UNKNOWNFILEIDS => {:type => ::Thrift::Types::LIST, :name => 'unknownFileIds', :element => {:type => ::Thrift::Types::I64}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field metadata is unset!') unless @metadata + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field isSupported is unset!') if @isSupported.nil? + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field unknownFileIds is unset!') unless @unknownFileIds + end + + ::Thrift::Struct.generate_accessors self +end + +class GetFileMetadataByExprRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + FILEIDS = 1 + EXPR = 2 + + FIELDS = { + FILEIDS => {:type => ::Thrift::Types::LIST, :name => 'fileIds', :element => {:type => ::Thrift::Types::I64}}, + EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fileIds is unset!') unless @fileIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field expr is unset!') unless @expr + end + + ::Thrift::Struct.generate_accessors self +end + +class GetFileMetadataResult + include ::Thrift::Struct, ::Thrift::Struct_Union + METADATA = 1 + ISSUPPORTED = 2 + + FIELDS = { + METADATA => {:type => ::Thrift::Types::MAP, :name => 'metadata', :key => {:type => ::Thrift::Types::I64}, :value => {:type => ::Thrift::Types::STRING, :binary => true}}, + ISSUPPORTED => {:type => ::Thrift::Types::BOOL, :name => 'isSupported'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field metadata is unset!') unless @metadata + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field isSupported is unset!') if @isSupported.nil? + end + + ::Thrift::Struct.generate_accessors self +end + +class GetFileMetadataRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + FILEIDS = 1 + + FIELDS = { + FILEIDS => {:type => ::Thrift::Types::LIST, :name => 'fileIds', :element => {:type => ::Thrift::Types::I64}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fileIds is unset!') unless @fileIds + end + + ::Thrift::Struct.generate_accessors self +end + +class PutFileMetadataResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class PutFileMetadataRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + FILEIDS = 1 + METADATA = 2 + + FIELDS = { + FILEIDS => {:type => ::Thrift::Types::LIST, :name => 'fileIds', :element => {:type => ::Thrift::Types::I64}}, + METADATA => {:type => ::Thrift::Types::LIST, :name => 'metadata', :element => {:type => ::Thrift::Types::STRING, :binary => true}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fileIds is unset!') unless @fileIds + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field metadata is unset!') unless @metadata + end + + ::Thrift::Struct.generate_accessors self +end + +class ClearFileMetadataResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class ClearFileMetadataRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + FILEIDS = 1 + + FIELDS = { + FILEIDS => {:type => ::Thrift::Types::LIST, :name => 'fileIds', :element => {:type => ::Thrift::Types::I64}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field fileIds is unset!') unless @fileIds + end + + ::Thrift::Struct.generate_accessors self +end + class GetAllFunctionsResponse include ::Thrift::Struct, ::Thrift::Struct_Union FUNCTIONS = 1 diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 73a1d20..8625c7b 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2090,6 +2090,80 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'fire_listener_event failed: unknown result') end + def flushCache() + send_flushCache() + recv_flushCache() + end + + def send_flushCache() + send_message('flushCache', FlushCache_args) + end + + def recv_flushCache() + result = receive_message(FlushCache_result) + return + end + + def get_file_metadata_by_expr(req) + send_get_file_metadata_by_expr(req) + return recv_get_file_metadata_by_expr() + end + + def send_get_file_metadata_by_expr(req) + send_message('get_file_metadata_by_expr', Get_file_metadata_by_expr_args, :req => req) + end + + def recv_get_file_metadata_by_expr() + result = receive_message(Get_file_metadata_by_expr_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_file_metadata_by_expr failed: unknown result') + end + + def get_file_metadata(req) + send_get_file_metadata(req) + return recv_get_file_metadata() + end + + def send_get_file_metadata(req) + send_message('get_file_metadata', Get_file_metadata_args, :req => req) + end + + def recv_get_file_metadata() + result = receive_message(Get_file_metadata_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_file_metadata failed: unknown result') + end + + def put_file_metadata(req) + send_put_file_metadata(req) + return recv_put_file_metadata() + end + + def send_put_file_metadata(req) + send_message('put_file_metadata', Put_file_metadata_args, :req => req) + end + + def recv_put_file_metadata() + result = receive_message(Put_file_metadata_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'put_file_metadata failed: unknown result') + end + + def clear_file_metadata(req) + send_clear_file_metadata(req) + return recv_clear_file_metadata() + end + + def send_clear_file_metadata(req) + send_message('clear_file_metadata', Clear_file_metadata_args, :req => req) + end + + def recv_clear_file_metadata() + result = receive_message(Clear_file_metadata_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'clear_file_metadata failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -3681,6 +3755,41 @@ module ThriftHiveMetastore write_result(result, oprot, 'fire_listener_event', seqid) end + def process_flushCache(seqid, iprot, oprot) + args = read_args(iprot, FlushCache_args) + result = FlushCache_result.new() + @handler.flushCache() + write_result(result, oprot, 'flushCache', seqid) + end + + def process_get_file_metadata_by_expr(seqid, iprot, oprot) + args = read_args(iprot, Get_file_metadata_by_expr_args) + result = Get_file_metadata_by_expr_result.new() + result.success = @handler.get_file_metadata_by_expr(args.req) + write_result(result, oprot, 'get_file_metadata_by_expr', seqid) + end + + def process_get_file_metadata(seqid, iprot, oprot) + args = read_args(iprot, Get_file_metadata_args) + result = Get_file_metadata_result.new() + result.success = @handler.get_file_metadata(args.req) + write_result(result, oprot, 'get_file_metadata', seqid) + end + + def process_put_file_metadata(seqid, iprot, oprot) + args = read_args(iprot, Put_file_metadata_args) + result = Put_file_metadata_result.new() + result.success = @handler.put_file_metadata(args.req) + write_result(result, oprot, 'put_file_metadata', seqid) + end + + def process_clear_file_metadata(seqid, iprot, oprot) + args = read_args(iprot, Clear_file_metadata_args) + result = Clear_file_metadata_result.new() + result.success = @handler.clear_file_metadata(args.req) + write_result(result, oprot, 'clear_file_metadata', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -8415,5 +8524,163 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class FlushCache_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FlushCache_result + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_file_metadata_by_expr_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetFileMetadataByExprRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_file_metadata_by_expr_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetFileMetadataByExprResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_file_metadata_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetFileMetadataRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_file_metadata_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetFileMetadataResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Put_file_metadata_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::PutFileMetadataRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Put_file_metadata_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::PutFileMetadataResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Clear_file_metadata_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::ClearFileMetadataRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Clear_file_metadata_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::ClearFileMetadataResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index ee20430..0082773 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -17,12 +17,7 @@ */ package org.apache.hadoop.hive.metastore; -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - +import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -48,7 +43,11 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hive.common.util.HiveStringUtils; -import com.google.common.collect.Lists; +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; /** * Hive specific implementation of alter @@ -121,8 +120,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, // get old table oldt = msdb.getTable(dbname, name); if (oldt == null) { - throw new InvalidOperationException("table " + newt.getDbName() + "." - + newt.getTableName() + " doesn't exist"); + throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist"); } if (HiveConf.getBoolVar(hiveConf, @@ -670,17 +668,19 @@ private void updateTableColumnStatsForAlterTable(RawStore msdb, Table oldTable, } List statsObjs = cs.getStatsObj(); - for (ColumnStatisticsObj statsObj : statsObjs) { - boolean found = false; - for (FieldSchema newCol : newCols) { - if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) - && statsObj.getColType().equals(newCol.getType())) { - found = true; - break; + if (statsObjs != null) { + for (ColumnStatisticsObj statsObj : statsObjs) { + boolean found = false; + for (FieldSchema newCol : newCols) { + if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) + && statsObj.getColType().equals(newCol.getType())) { + found = true; + break; + } + } + if (!found) { + msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); } - } - if (!found) { - msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); } } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 1840e76..8eb08b8 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -52,6 +52,8 @@ import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; +import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.ClearFileMetadataResult; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -69,6 +71,10 @@ import org.apache.hadoop.hive.metastore.api.FireEventResponse; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; @@ -114,6 +120,8 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.PutFileMetadataResult; import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; @@ -167,14 +175,6 @@ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnHandler; import org.apache.hadoop.hive.serde2.Deserializer; @@ -208,6 +208,7 @@ import javax.jdo.JDOException; import java.io.IOException; +import java.nio.ByteBuffer; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.AbstractMap; @@ -292,8 +293,7 @@ public TTransport getTransport(TTransport trans) { } } - public static class HMSHandler extends FacebookBase implements - IHMSHandler { + public static class HMSHandler extends FacebookBase implements IHMSHandler { public static final Log LOG = HiveMetaStore.LOG; private String rawStoreClassName; private final HiveConf hiveConf; // stores datastore (jpox) properties, @@ -598,6 +598,19 @@ private RawStore newRawStore() throws MetaException { + rawStoreClassName)); Configuration conf = getConf(); + if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + LOG.info("Fastpath, skipping raw store proxy"); + try { + RawStore rs = ((Class) MetaStoreUtils.getClass( + rawStoreClassName)).newInstance(); + rs.setConf(conf); + return rs; + } catch (Exception e) { + LOG.fatal("Unable to instantiate raw store directly in fastpath mode"); + throw new RuntimeException(e); + } + } + return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get()); } @@ -1957,7 +1970,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab firePreEvent(new PreAddPartitionEvent(tbl, part, this)); - part.setSd(tbl.getSd()); + part.setSd(tbl.getSd().deepCopy()); partLocation = new Path(tbl.getSd().getLocation(), Warehouse .makePartName(tbl.getPartitionKeys(), part_vals)); part.getSd().setLocation(partLocation.toString()); @@ -2929,8 +2942,8 @@ public Partition get_partition(final String db_name, final String tbl_name, * Fire a pre-event for read table operation, if there are any * pre-event listeners registered * - * @param db_name - * @param tbl_name + * @param dbName + * @param tblName * @throws MetaException * @throws NoSuchObjectException */ @@ -4264,8 +4277,8 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) } try { ColumnStatistics cs = getMS().getTableColumnStatistics(dbName, tblName, lowerCaseColNames); - result = new TableStatsResult( - cs == null ? Lists.newArrayList() : cs.getStatsObj()); + result = new TableStatsResult((cs == null || cs.getStatsObj() == null) + ? Lists.newArrayList() : cs.getStatsObj()); } finally { endFunction("get_table_statistics_req: ", result == null, null, tblName); } @@ -4443,8 +4456,7 @@ public boolean delete_partition_column_statistics(String dbName, String tableNam @Override public boolean delete_table_column_statistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, - InvalidInputException - { + InvalidInputException { dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); @@ -4461,7 +4473,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S endFunction("delete_column_statistics_by_table: ", ret != false, null, tableName); } return ret; - } + } @Override public List get_partitions_by_filter(final String dbName, @@ -4726,9 +4738,9 @@ private boolean isNewRoleAParent(String newRole, String curRole) throws MetaExce return true; } //do this check recursively on all the parent roles of curRole - List parentRoleMaps = getMS().listRoles(curRole, PrincipalType.ROLE); - for(MRoleMap parentRole : parentRoleMaps){ - if(isNewRoleAParent(newRole, parentRole.getRole().getRoleName())){ + List parentRoleMaps = getMS().listRoles(curRole, PrincipalType.ROLE); + for(Role parentRole : parentRoleMaps){ + if(isNewRoleAParent(newRole, parentRole.getRoleName())){ return true; } } @@ -4740,26 +4752,9 @@ private boolean isNewRoleAParent(String newRole, String curRole) throws MetaExce final PrincipalType principalType) throws MetaException, TException { incrementCounter("list_roles"); firePreEvent(new PreAuthorizationCallEvent(this)); - List result = new ArrayList(); - try { - List roleMaps = getMS().listRoles(principalName, principalType); - if (roleMaps != null) { - for (MRoleMap roleMap : roleMaps) { - MRole mrole = roleMap.getRole(); - Role role = new Role(mrole.getRoleName(), mrole.getCreateTime(), mrole.getOwnerName()); - result.add(role); - } - } - return result; - } catch (MetaException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } + return getMS().listRoles(principalName, principalType); } - - @Override public boolean create_role(final Role role) throws MetaException, TException { @@ -5007,25 +5002,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listTableColumnGrantsAll(dbName, tableName, columnName); } - List mTableCols = getMS() + List result = getMS() .listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName); - if (mTableCols.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mTableCols.size(); i++) { - MTableColumnPrivilege sCol = mTableCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, null, sCol.getColumnName()); - HiveObjectPrivilege secObj = new HiveObjectPrivilege( - objectRef, sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol - .getGrantOption())); - result.add(secObj); - } return result; } catch (MetaException e) { throw e; @@ -5049,24 +5028,11 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); } - List mPartitionCols = getMS().listPrincipalPartitionColumnGrants( - principalName, - principalType, dbName, tableName, partName, columnName); - if (mPartitionCols.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mPartitionCols.size(); i++) { - MPartitionColumnPrivilege sCol = mPartitionCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, partValues, sCol.getColumnName()); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol.getGrantOption())); - result.add(secObj); - } + + List result = + getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, + tableName, partValues, partName, columnName); + return result; } catch (MetaException e) { throw e; @@ -5086,25 +5052,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } if (principalName == null) { return getMS().listDBGrantsAll(dbName); + } else { + return getMS().listPrincipalDBGrants(principalName, principalType, dbName); } - List mDbs = getMS().listPrincipalDBGrants( - principalName, principalType, dbName); - if (mDbs.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mDbs.size(); i++) { - MDBPrivilege sDB = mDbs.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.DATABASE, dbName, null, null, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sDB.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sDB.getPrivilege(), sDB - .getCreateTime(), sDB.getGrantor(), PrincipalType - .valueOf(sDB.getGrantorType()), sDB.getGrantOption())); - result.add(secObj); - } - return result; } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5127,25 +5077,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listPartitionGrantsAll(dbName, tableName, partName); } - List mParts = getMS().listPrincipalPartitionGrants( - principalName, principalType, dbName, tableName, partName); - if (mParts.isEmpty()) { - return Collections. emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mParts.size(); i++) { - MPartitionPrivilege sPart = mParts.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.PARTITION, dbName, tableName, partValues, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sPart.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sPart.getPrivilege(), sPart - .getCreateTime(), sPart.getGrantor(), PrincipalType - .valueOf(sPart.getGrantorType()), sPart - .getGrantOption())); - - result.add(secObj); - } + List result = getMS().listPrincipalPartitionGrants( + principalName, principalType, dbName, tableName, partValues, partName); + return result; } catch (MetaException e) { throw e; @@ -5167,23 +5101,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listTableGrantsAll(dbName, tableName); } - List mTbls = getMS() + List result = getMS() .listAllTableGrants(principalName, principalType, dbName, tableName); - if (mTbls.isEmpty()) { - return Collections. emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mTbls.size(); i++) { - MTablePrivilege sTbl = mTbls.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.TABLE, dbName, tableName, null, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sTbl.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl - .getGrantor(), PrincipalType.valueOf(sTbl - .getGrantorType()), sTbl.getGrantOption())); - result.add(secObj); - } + return result; } catch (MetaException e) { throw e; @@ -5201,23 +5121,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (principalName == null) { return getMS().listGlobalGrantsAll(); } - List mUsers = getMS().listPrincipalGlobalGrants( + List result = getMS().listPrincipalGlobalGrants( principalName, principalType); - if (mUsers.isEmpty()) { - return Collections. emptyList(); - } - List result = new ArrayList(); - for (int i = 0; i < mUsers.size(); i++) { - MGlobalPrivilege sUsr = mUsers.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.GLOBAL, null, null, null, null); - HiveObjectPrivilege secUser = new HiveObjectPrivilege( - objectRef, sUsr.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr - .getCreateTime(), sUsr.getGrantor(), PrincipalType - .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption())); - result.add(secUser); - } + return result; } catch (MetaException e) { throw e; @@ -5652,6 +5558,11 @@ public ShowCompactResponse show_compact(ShowCompactRequest rqst) throws TExcepti } @Override + public void flushCache() throws TException { + getMS().flushCache(); + } + + @Override public void add_dynamic_partitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, TException { getTxnHandler().addDynamicPartitions(rqst); @@ -5664,9 +5575,9 @@ public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleReq incrementCounter("get_principals_in_role"); firePreEvent(new PreAuthorizationCallEvent(this)); Exception ex = null; - List roleMaps = null; + GetPrincipalsInRoleResponse response = null; try { - roleMaps = getMS().listRoleMembers(request.getRoleName()); + response = new GetPrincipalsInRoleResponse(getMS().listRoleMembers(request.getRoleName())); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5675,7 +5586,7 @@ public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleReq } finally { endFunction("get_principals_in_role", ex == null, ex); } - return new GetPrincipalsInRoleResponse(getRolePrincipalGrants(roleMaps)); + return response; } @Override @@ -5685,9 +5596,9 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( incrementCounter("get_role_grants_for_principal"); firePreEvent(new PreAuthorizationCallEvent(this)); Exception ex = null; - List roleMaps = null; + List roleMaps = null; try { - roleMaps = getMS().listRoles(request.getPrincipal_name(), request.getPrincipal_type()); + roleMaps = getMS().listRolesWithGrants(request.getPrincipal_name(), request.getPrincipal_type()); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5697,31 +5608,20 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( endFunction("get_role_grants_for_principal", ex == null, ex); } - List roleGrantsList = getRolePrincipalGrants(roleMaps); - return new GetRoleGrantsForPrincipalResponse(roleGrantsList); + //List roleGrantsList = getRolePrincipalGrants(roleMaps); + return new GetRoleGrantsForPrincipalResponse(roleMaps); } /** * Convert each MRoleMap object into a thrift RolePrincipalGrant object - * @param roleMaps + * @param roles * @return */ - private List getRolePrincipalGrants(List roleMaps) { + private List getRolePrincipalGrants(List roles) throws MetaException { List rolePrinGrantList = new ArrayList(); - if (roleMaps != null) { - for (MRoleMap roleMap : roleMaps) { - RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( - roleMap.getRole().getRoleName(), - roleMap.getPrincipalName(), - PrincipalType.valueOf(roleMap.getPrincipalType()), - roleMap.getGrantOption(), - roleMap.getAddTime(), - roleMap.getGrantor(), - // no grantor type for public role, hence the null check - roleMap.getGrantorType() == null ? null - : PrincipalType.valueOf(roleMap.getGrantorType()) - ); - rolePrinGrantList.add(rolePrinGrant); + if (roles != null) { + for (Role role : roles) { + rolePrinGrantList.addAll(getMS().listRoleMembers(role.getRoleName())); } } return rolePrinGrantList; @@ -5796,6 +5696,40 @@ public FireEventResponse fire_listener_event(FireEventRequest rqst) throws TExce } } + + @Override + public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req) + throws TException { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws TException { + List fileIds = req.getFileIds(); + ByteBuffer[] metadatas = getMS().getFileMetadata(fileIds); + GetFileMetadataResult result = new GetFileMetadataResult(); + result.setIsSupported(metadatas != null); + if (metadatas != null) { + assert metadatas.length == fileIds.size(); + for (int i = 0; i < metadatas.length; ++i) { + result.putToMetadata(fileIds.get(i), metadatas[i]); + } + } + return result; + } + + @Override + public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws TException { + getMS().putFileMetadata(req.getFileIds(), req.getMetadata()); + return new PutFileMetadataResult(); + } + + @Override + public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) + throws TException { + getMS().putFileMetadata(req.getFileIds(), null); + return new ClearFileMetadataResult(); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 482f278..6f15fd0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -166,6 +166,7 @@ private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; protected final HiveConf conf; + protected boolean fastpath = false; private String tokenStrForm; private final boolean localMetaStore; private final MetaStoreFilterHook filterHook; @@ -200,10 +201,20 @@ public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) if (localMetaStore) { // instantiate the metastore server handler directly instead of connecting // through the network - client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true); + if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + client = new HiveMetaStore.HMSHandler("hive client", conf, true); + fastpath = true; + } else { + client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true); + } isConnected = true; snapshotActiveConf(); return; + } else { + if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + throw new RuntimeException("You can't set hive.metastore.fastpath to true when you're " + + "talking to the thrift metastore service. You must run the metastore locally."); + } } // get the number retries @@ -537,7 +548,8 @@ public Partition add_partition(Partition new_part) public Partition add_partition(Partition new_part, EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return deepCopy(client.add_partition_with_environment_context(new_part, envContext)); + Partition p = client.add_partition_with_environment_context(new_part, envContext); + return fastpath ? p : deepCopy(p); } /** @@ -597,8 +609,9 @@ public Partition appendPartition(String db_name, String table_name, public Partition appendPartition(String db_name, String table_name, List part_vals, EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return deepCopy(client.append_partition_with_environment_context(db_name, table_name, - part_vals, envContext)); + Partition p = client.append_partition_with_environment_context(db_name, table_name, + part_vals, envContext); + return fastpath ? p : deepCopy(p); } @Override @@ -610,8 +623,9 @@ public Partition appendPartition(String dbName, String tableName, String partNam public Partition appendPartition(String dbName, String tableName, String partName, EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return deepCopy(client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext)); + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); } /** @@ -1051,8 +1065,8 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List listPartitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(filterHook.filterPartitions( - client.get_partitions(db_name, tbl_name, max_parts))); + List parts = client.get_partitions(db_name, tbl_name, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override @@ -1065,16 +1079,17 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(filterHook.filterPartitions( - client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts))); + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public List listPartitionsWithAuthInfo(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(filterHook.filterPartitions( - client.get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names))); + List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + user_name, group_names); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override @@ -1082,8 +1097,9 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(filterHook.filterPartitions(client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names))); + List parts = client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } /** @@ -1104,8 +1120,8 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public List listPartitionsByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException { - return deepCopyPartitions(filterHook.filterPartitions( - client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts))); + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override @@ -1141,9 +1157,13 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr throw new IncompatibleMetastoreException( "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); } - r.setPartitions(filterHook.filterPartitions(r.getPartitions())); - // TODO: in these methods, do we really need to deepcopy? - deepCopyPartitions(r.getPartitions(), result); + if (fastpath) { + result.addAll(r.getPartitions()); + } else { + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); + } return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. } @@ -1159,7 +1179,8 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr @Override public Database getDatabase(String name) throws NoSuchObjectException, MetaException, TException { - return deepCopy(filterHook.filterDatabase(client.get_database(name))); + Database d = client.get_database(name); + return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); } /** @@ -1175,15 +1196,15 @@ public Database getDatabase(String name) throws NoSuchObjectException, @Override public Partition getPartition(String db_name, String tbl_name, List part_vals) throws NoSuchObjectException, MetaException, TException { - return deepCopy(filterHook.filterPartition( - client.get_partition(db_name, tbl_name, part_vals))); + Partition p = client.get_partition(db_name, tbl_name, part_vals); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } @Override public List getPartitionsByNames(String db_name, String tbl_name, List part_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(filterHook.filterPartitions( - client.get_partitions_by_names(db_name, tbl_name, part_names))); + List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override @@ -1191,8 +1212,9 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, UnknownTableException, NoSuchObjectException, TException { - return deepCopy(filterHook.filterPartition(client.get_partition_with_auth(db_name, - tbl_name, part_vals, user_name, group_names))); + Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, + group_names); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } /** @@ -1209,7 +1231,8 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, @Override public Table getTable(String dbname, String name) throws MetaException, TException, NoSuchObjectException { - return deepCopy(filterHook.filterTable(client.get_table(dbname, name))); + Table t = client.get_table(dbname, name); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); } /** {@inheritDoc} */ @@ -1217,15 +1240,16 @@ public Table getTable(String dbname, String name) throws MetaException, @Deprecated public Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException { - return filterHook.filterTable(getTable(DEFAULT_DATABASE_NAME, tableName)); + Table t = getTable(DEFAULT_DATABASE_NAME, tableName); + return fastpath ? t : filterHook.filterTable(t); } /** {@inheritDoc} */ @Override public List
getTableObjectsByName(String dbName, List tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { - return deepCopyTables(filterHook.filterTables( - client.get_table_objects_by_name(dbName, tableNames))); + List
tabs = client.get_table_objects_by_name(dbName, tableNames); + return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); } /** {@inheritDoc} */ @@ -1334,7 +1358,8 @@ public void alterDatabase(String dbName, Database db) public List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException { - return deepCopyFieldSchemas(client.get_fields(db, tableName)); + List fields = client.get_fields(db, tableName); + return fastpath ? fields : deepCopyFieldSchemas(fields); } /** @@ -1442,6 +1467,16 @@ public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) return client.set_aggr_stats_for(request); } + @Override + public void flushCache() { + try { + client.flushCache(); + } catch (TException e) { + // Not much we can do about it honestly + LOG.warn("Got error flushing the cache", e); + } + } + /** {@inheritDoc} */ @Override public List getTableColumnStatistics(String dbName, String tableName, @@ -1500,7 +1535,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri envCxt = new EnvironmentContext(props); } - return deepCopyFieldSchemas(client.get_schema_with_environment_context(db, tableName, envCxt)); + List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + return fastpath ? fields : deepCopyFieldSchemas(fields); } @Override @@ -1512,8 +1548,8 @@ public String getConfigValue(String name, String defaultValue) @Override public Partition getPartition(String db, String tableName, String partName) throws MetaException, TException, UnknownTableException, NoSuchObjectException { - return deepCopy( - filterHook.filterPartition(client.get_partition_by_name(db, tableName, partName))); + Partition p = client.get_partition_by_name(db, tableName, partName); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); } public Partition appendPartitionByName(String dbName, String tableName, String partName) @@ -1524,8 +1560,9 @@ public Partition appendPartitionByName(String dbName, String tableName, String p public Partition appendPartitionByName(String dbName, String tableName, String partName, EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { - return deepCopy(client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext)); + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); } public boolean dropPartitionByName(String dbName, String tableName, String partName, @@ -2036,7 +2073,8 @@ public void dropFunction(String dbName, String funcName) @Override public Function getFunction(String dbName, String funcName) throws MetaException, TException { - return deepCopy(client.get_function(dbName, funcName)); + Function f = client.get_function(dbName, funcName); + return fastpath ? f : deepCopy(f); } @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 5fde6d3..e4a6cdb 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1453,4 +1453,10 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; + + /** + * Flush any catalog objects held by the metastore implementation. Note that this does not + * flush statistics objects. This should be called at the beginning of each query. + */ + void flushCache(); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index d165fc8..1b2700a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.URI; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -100,6 +101,7 @@ import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -360,6 +362,7 @@ private static PartitionExpressionProxy createExpressionProxy(Configuration conf } } + /** * Properties specified in hive-default.xml override the properties specified * in jpox.properties. @@ -2137,24 +2140,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin final String defaultPartitionName, final short maxParts, List result, boolean allowSql, boolean allowJdo) throws TException { assert result != null; - - // We will try pushdown first, so make the filter. This will also validate the expression, - // if serialization fails we will throw incompatible metastore error to the client. - String filter = null; - try { - filter = expressionProxy.convertExprToFilter(expr); - } catch (MetaException ex) { - throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); - } - - // Make a tree out of the filter. - // TODO: this is all pretty ugly. The only reason we need all these transformations - // is to maintain support for simple filters for HCat users that query metastore. - // If forcing everyone to use thick client is out of the question, maybe we could - // parse the filter into standard hive expressions and not all this separate tree - // Filter.g stuff. That way this method and ...ByFilter would just be merged. - final ExpressionTree exprTree = makeExpressionTree(filter); - + final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); result.addAll(new GetListHelper(dbName, tblName, allowSql, allowJdo) { @Override @@ -2194,50 +2180,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin return hasUnknownPartitions.get(); } - private class LikeChecker extends ExpressionTree.TreeVisitor { - private boolean hasLike; - - public boolean hasLike() { - return hasLike; - } - - @Override - protected boolean shouldStop() { - return hasLike; - } - - @Override - protected void visit(LeafNode node) throws MetaException { - hasLike = hasLike || (node.operator == Operator.LIKE); - } - } - /** - * Makes expression tree out of expr. - * @param filter Filter. - * @return Expression tree. Null if there was an error. - */ - private ExpressionTree makeExpressionTree(String filter) throws MetaException { - // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g. - if (filter == null || filter.isEmpty()) { - return ExpressionTree.EMPTY_TREE; - } - LOG.debug("Filter specified is " + filter); - ExpressionTree tree = null; - try { - tree = getFilterParser(filter).tree; - } catch (MetaException ex) { - LOG.info("Unable to make the expression tree from expression string [" - + filter + "]" + ex.getMessage()); // Don't log the stack, this is normal. - } - if (tree == null) { - return null; - } - // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here. - LikeChecker lc = new LikeChecker(); - tree.accept(lc); - return lc.hasLike() ? null : tree; - } /** * Gets the partition names from a table, pruned using an expression. @@ -2573,7 +2516,7 @@ protected String describeResult() { String filter, final short maxParts, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; + ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; return new GetListHelper(dbName, tblName, allowSql, allowJdo) { @Override @@ -2616,24 +2559,6 @@ private Table ensureGetTable( return convertToTable(ensureGetMTable(dbName, tblName)); } - private FilterParser getFilterParser(String filter) throws MetaException { - FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter)); - CommonTokenStream tokens = new CommonTokenStream(lexer); - - FilterParser parser = new FilterParser(tokens); - try { - parser.filter(); - } catch(RecognitionException re) { - throw new MetaException("Error parsing partition filter; lexer error: " - + lexer.errorMsg + "; exception " + re); - } - - if (lexer.errorMsg != null) { - throw new MetaException("Error parsing partition filter : " + lexer.errorMsg); - } - return parser; - } - /** * Makes a JDO query filter string. * Makes a JDO query filter string for tables or partitions. @@ -2647,7 +2572,7 @@ private FilterParser getFilterParser(String filter) throws MetaException { private String makeQueryFilterString(String dbName, MTable mtable, String filter, Map params) throws MetaException { ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; + ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true); } @@ -3418,7 +3343,7 @@ public boolean removeRole(String roleName) throws MetaException, if (mRol != null) { // first remove all the membership, the membership that this role has // been granted - List roleMap = listRoleMembers(mRol.getRoleName()); + List roleMap = listMRoleMembers(mRol.getRoleName()); if (roleMap.size() > 0) { pm.deletePersistentAll(roleMap); } @@ -3429,7 +3354,7 @@ public boolean removeRole(String roleName) throws MetaException, } queryWrapper.close(); // then remove all the grants - List userGrants = listPrincipalGlobalGrants( + List userGrants = listPrincipalMGlobalGrants( mRol.getRoleName(), PrincipalType.ROLE); if (userGrants.size() > 0) { pm.deletePersistentAll(userGrants); @@ -3489,11 +3414,11 @@ public boolean removeRole(String roleName) throws MetaException, List groupNames) { List ret = new ArrayList(); if(userName != null) { - ret.addAll(listRoles(userName, PrincipalType.USER)); + ret.addAll(listMRoles(userName, PrincipalType.USER)); } if (groupNames != null) { for (String groupName: groupNames) { - ret.addAll(listRoles(groupName, PrincipalType.GROUP)); + ret.addAll(listMRoles(groupName, PrincipalType.GROUP)); } } // get names of these roles and its ancestors @@ -3514,7 +3439,7 @@ private void getAllRoleAncestors(Set processedRoleNames, List if (!processedRoleNames.contains(parentRoleName)) { // unprocessed role: get its parents, add it to processed, and call this // function recursively - List nextParentRoles = listRoles(parentRoleName, PrincipalType.ROLE); + List nextParentRoles = listMRoles(parentRoleName, PrincipalType.ROLE); processedRoleNames.add(parentRoleName); getAllRoleAncestors(processedRoleNames, nextParentRoles); } @@ -3522,8 +3447,8 @@ private void getAllRoleAncestors(Set processedRoleNames, List } @SuppressWarnings("unchecked") - @Override - public List listRoles(String principalName, PrincipalType principalType) { + public List listMRoles(String principalName, + PrincipalType principalType) { boolean success = false; Query query = null; List mRoleMember = new ArrayList(); @@ -3562,6 +3487,44 @@ private void getAllRoleAncestors(Set processedRoleNames, List return mRoleMember; } + @Override + public List listRoles(String principalName, PrincipalType principalType) { + List result = new ArrayList(); + List roleMaps = listMRoles(principalName, principalType); + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + MRole mrole = roleMap.getRole(); + Role role = new Role(mrole.getRoleName(), mrole.getCreateTime(), mrole.getOwnerName()); + result.add(role); + } + } + return result; + } + + @Override + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { + List result = new ArrayList(); + List roleMaps = listMRoles(principalName, principalType); + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( + roleMap.getRole().getRoleName(), + roleMap.getPrincipalName(), + PrincipalType.valueOf(roleMap.getPrincipalType()), + roleMap.getGrantOption(), + roleMap.getAddTime(), + roleMap.getGrantor(), + // no grantor type for public role, hence the null check + roleMap.getGrantorType() == null ? null + : PrincipalType.valueOf(roleMap.getGrantorType()) + ); + result.add(rolePrinGrant); + } + } + return result; + } + @SuppressWarnings("unchecked") private List listMSecurityPrincipalMembershipRole(final String roleName, final PrincipalType principalType, @@ -3655,7 +3618,7 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, try { openTransaction(); if (userName != null) { - List user = this.listPrincipalGlobalGrants(userName, PrincipalType.USER); + List user = this.listPrincipalMGlobalGrants(userName, PrincipalType.USER); if(user.size()>0) { Map> userPriv = new HashMap>(); List grantInfos = new ArrayList(user.size()); @@ -3672,7 +3635,8 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, if (groupNames != null && groupNames.size() > 0) { Map> groupPriv = new HashMap>(); for(String groupName: groupNames) { - List group = this.listPrincipalGlobalGrants(groupName, PrincipalType.GROUP); + List group = + this.listPrincipalMGlobalGrants(groupName, PrincipalType.GROUP); if(group.size()>0) { List grantInfos = new ArrayList(group.size()); for (int i = 0; i < group.size(); i++) { @@ -3701,7 +3665,7 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, dbName = HiveStringUtils.normalizeIdentifier(dbName); if (principalName != null) { - List userNameDbPriv = this.listPrincipalDBGrants( + List userNameDbPriv = this.listPrincipalMDBGrants( principalName, principalType, dbName); if (userNameDbPriv != null && userNameDbPriv.size() > 0) { List grantInfos = new ArrayList( @@ -3901,7 +3865,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, if (principalName != null) { List userNameTabPartPriv = this - .listPrincipalPartitionGrants(principalName, principalType, + .listPrincipalMPartitionGrants(principalName, principalType, dbName, tableName, partName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { List grantInfos = new ArrayList( @@ -3930,7 +3894,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { if (principalName != null) { List userNameTabPartPriv = this - .listAllTableGrants(principalName, principalType, + .listAllMTableGrants(principalName, principalType, dbName, tableName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { List grantInfos = new ArrayList( @@ -3957,7 +3921,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { if (partitionName == null) { List userNameColumnPriv = this - .listPrincipalTableColumnGrants(principalName, principalType, + .listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { List grantInfos = new ArrayList( @@ -3972,7 +3936,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { } } else { List userNameColumnPriv = this - .listPrincipalPartitionColumnGrants(principalName, + .listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, partitionName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { List grantInfos = new ArrayList( @@ -4021,7 +3985,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { List globalPrivs = this - .listPrincipalGlobalGrants(userName, principalType); + .listPrincipalMGlobalGrants(userName, principalType); if (globalPrivs != null) { for (MGlobalPrivilege priv : globalPrivs) { if (priv.getGrantor().equalsIgnoreCase(grantor)) { @@ -4041,7 +4005,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { MDatabase dbObj = getMDatabase(hiveObject.getDbName()); if (dbObj != null) { - List dbPrivs = this.listPrincipalDBGrants( + List dbPrivs = this.listPrincipalMDBGrants( userName, principalType, hiveObject.getDbName()); if (dbPrivs != null) { for (MDBPrivilege priv : dbPrivs) { @@ -4066,7 +4030,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce .getObjectName()); if (tblObj != null) { List tablePrivs = this - .listAllTableGrants(userName, principalType, + .listAllMTableGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName()); if (tablePrivs != null) { for (MTablePrivilege priv : tablePrivs) { @@ -4096,7 +4060,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce if (partObj != null) { partName = partObj.getPartitionName(); List partPrivs = this - .listPrincipalPartitionGrants(userName, + .listPrincipalMPartitionGrants(userName, principalType, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName()); if (partPrivs != null) { @@ -4132,7 +4096,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce if (partObj == null) { continue; } - colPrivs = this.listPrincipalPartitionColumnGrants( + colPrivs = this.listPrincipalMPartitionColumnGrants( userName, principalType, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName(), hiveObject.getColumnName()); @@ -4162,7 +4126,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } else { List colPrivs = null; - colPrivs = this.listPrincipalTableColumnGrants( + colPrivs = this.listPrincipalMTableColumnGrants( userName, principalType, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); @@ -4230,7 +4194,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) PrincipalType principalType = privDef.getPrincipalType(); if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { - List mSecUser = this.listPrincipalGlobalGrants( + List mSecUser = this.listPrincipalMGlobalGrants( userName, principalType); boolean found = false; if (mSecUser != null) { @@ -4263,7 +4227,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) if (dbObj != null) { String db = hiveObject.getDbName(); boolean found = false; - List dbGrants = this.listPrincipalDBGrants( + List dbGrants = this.listPrincipalMDBGrants( userName, principalType, db); for (String privilege : privs) { for (MDBPrivilege dbGrant : dbGrants) { @@ -4292,7 +4256,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { boolean found = false; List tableGrants = this - .listAllTableGrants(userName, principalType, + .listAllMTableGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName()); for (String privilege : privs) { for (MTablePrivilege tabGrant : tableGrants) { @@ -4326,7 +4290,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); } List partitionGrants = this - .listPrincipalPartitionGrants(userName, principalType, + .listPrincipalMPartitionGrants(userName, principalType, hiveObject.getDbName(), hiveObject.getObjectName(), partName); for (String privilege : privs) { for (MPartitionPrivilege partGrant : partitionGrants) { @@ -4362,7 +4326,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } if (partName != null) { - List mSecCol = listPrincipalPartitionColumnGrants( + List mSecCol = listPrincipalMPartitionColumnGrants( userName, principalType, hiveObject.getDbName(), hiveObject .getObjectName(), partName, hiveObject.getColumnName()); boolean found = false; @@ -4394,7 +4358,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } } else { - List mSecCol = listPrincipalTableColumnGrants( + List mSecCol = listPrincipalMTableColumnGrants( userName, principalType, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); boolean found = false; @@ -4449,8 +4413,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - @Override - public List listRoleMembers(String roleName) { + public List listMRoleMembers(String roleName) { boolean success = false; Query query = null; List mRoleMemeberList = new ArrayList(); @@ -4479,10 +4442,34 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) return mRoleMemeberList; } - @SuppressWarnings("unchecked") @Override - public List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType) { + public List listRoleMembers(String roleName) { + List roleMaps = listMRoleMembers(roleName); + List rolePrinGrantList = new ArrayList(); + + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( + roleMap.getRole().getRoleName(), + roleMap.getPrincipalName(), + PrincipalType.valueOf(roleMap.getPrincipalType()), + roleMap.getGrantOption(), + roleMap.getAddTime(), + roleMap.getGrantor(), + // no grantor type for public role, hence the null check + roleMap.getGrantorType() == null ? null + : PrincipalType.valueOf(roleMap.getGrantorType()) + ); + rolePrinGrantList.add(rolePrinGrant); + + } + } + return rolePrinGrantList; + } + + @SuppressWarnings("unchecked") + public List listPrincipalMGlobalGrants(String principalName, + PrincipalType principalType) { boolean commited = false; Query query = null; List userNameDbPriv = new ArrayList(); @@ -4512,6 +4499,29 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @Override + public List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { + List mUsers = + listPrincipalMGlobalGrants(principalName, principalType); + if (mUsers.isEmpty()) { + return Collections. emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mUsers.size(); i++) { + MGlobalPrivilege sUsr = mUsers.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.GLOBAL, null, null, null, null); + HiveObjectPrivilege secUser = new HiveObjectPrivilege( + objectRef, sUsr.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr + .getCreateTime(), sUsr.getGrantor(), PrincipalType + .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption())); + result.add(secUser); + } + return result; + } + + @Override public List listGlobalGrantsAll() { boolean commited = false; Query query = null; @@ -4548,8 +4558,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - @Override - public List listPrincipalDBGrants(String principalName, + public List listPrincipalMDBGrants(String principalName, PrincipalType principalType, String dbName) { boolean success = false; Query query = null; @@ -4583,6 +4592,29 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @Override + public List listPrincipalDBGrants(String principalName, + PrincipalType principalType, + String dbName) { + List mDbs = listPrincipalMDBGrants(principalName, principalType, dbName); + if (mDbs.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mDbs.size(); i++) { + MDBPrivilege sDB = mDbs.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sDB.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sDB.getPrivilege(), sDB + .getCreateTime(), sDB.getGrantor(), PrincipalType + .valueOf(sDB.getGrantorType()), sDB.getGrantOption())); + result.add(secObj); + } + return result; + } + + @Override public List listPrincipalDBGrantsAll( String principalName, PrincipalType principalType) { QueryWrapper queryWrapper = new QueryWrapper(); @@ -4905,10 +4937,10 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List(query, params); } - @Override @SuppressWarnings("unchecked") - public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + public List listAllMTableGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName) { tableName = HiveStringUtils.normalizeIdentifier(tableName); dbName = HiveStringUtils.normalizeIdentifier(dbName); boolean success = false; @@ -4942,10 +4974,35 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String partName) { + public List listAllTableGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName) { + List mTbls = + listAllMTableGrants(principalName, principalType, dbName, tableName); + if (mTbls.isEmpty()) { + return Collections. emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mTbls.size(); i++) { + MTablePrivilege sTbl = mTbls.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.TABLE, dbName, tableName, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sTbl.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl + .getGrantor(), PrincipalType.valueOf(sTbl + .getGrantorType()), sTbl.getGrantOption())); + result.add(secObj); + } + return result; + } + + @SuppressWarnings("unchecked") + public List listPrincipalMPartitionGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partName) { boolean success = false; Query query = null; tableName = HiveStringUtils.normalizeIdentifier(tableName); @@ -4982,10 +5039,39 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String columnName) { + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + List partValues, + String partName) { + List mParts = listPrincipalMPartitionGrants(principalName, + principalType, dbName, tableName, partName); + if (mParts.isEmpty()) { + return Collections. emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mParts.size(); i++) { + MPartitionPrivilege sPart = mParts.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.PARTITION, dbName, tableName, partValues, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sPart.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sPart.getPrivilege(), sPart + .getCreateTime(), sPart.getGrantor(), PrincipalType + .valueOf(sPart.getGrantorType()), sPart + .getGrantOption())); + + result.add(secObj); + } + return result; + } + + @SuppressWarnings("unchecked") + public List listPrincipalMTableColumnGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String columnName) { boolean success = false; Query query = null; tableName = HiveStringUtils.normalizeIdentifier(tableName); @@ -5023,10 +5109,36 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + String columnName) { + List mTableCols = + listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); + if (mTableCols.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mTableCols.size(); i++) { + MTableColumnPrivilege sCol = mTableCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, null, sCol.getColumnName()); + HiveObjectPrivilege secObj = new HiveObjectPrivilege( + objectRef, sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol + .getGrantOption())); + result.add(secObj); + } + return result; + } + @SuppressWarnings("unchecked") - public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String partitionName, - String columnName) { + public List listPrincipalMPartitionColumnGrants( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; tableName = HiveStringUtils.normalizeIdentifier(tableName); @@ -5064,8 +5176,37 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionColumnGrantsAll(String principalName, - PrincipalType principalType) { + public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + List partValues, + String partitionName, + String columnName) { + List mPartitionCols = + listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, + partitionName, columnName); + if (mPartitionCols.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList(); + for (int i = 0; i < mPartitionCols.size(); i++) { + MPartitionColumnPrivilege sCol = mPartitionCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, partValues, sCol.getColumnName()); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol.getGrantOption())); + result.add(secObj); + } + return result; + } + + @Override + public List listPrincipalPartitionColumnGrantsAll( + String principalName, PrincipalType principalType) { boolean success = false; Query query = null; try { @@ -6403,9 +6544,14 @@ protected String describeResult() { }.run(true); } - private List getMPartitionColumnStatistics(Table table, - List partNames, List colNames, - QueryWrapper queryWrapper) throws NoSuchObjectException, MetaException { + @Override + public void flushCache() { + // NOP as there's no caching + } + + private List getMPartitionColumnStatistics( + Table table, List partNames, List colNames, QueryWrapper queryWrapper) + throws NoSuchObjectException, MetaException { boolean committed = false; try { @@ -7470,4 +7616,14 @@ private NotificationEvent translateDbToThrift(MNotificationLog dbEvent) { event.setMessage((dbEvent.getMessage())); return event; } + + @Override + public ByteBuffer[] getFileMetadata(List fileIds) { + return null; // Not supported for now; callers have to handle this accordingly. + } + + @Override + public void putFileMetadata(List fileIds, List metadata) { + // Not supported for now. + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java new file mode 100644 index 0000000..5766bdd --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.antlr.runtime.CommonTokenStream; +import org.antlr.runtime.RecognitionException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.FilterLexer; +import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; + +/** + * Utility functions for working with partition filter expressions + */ +public class PartFilterExprUtil { + private static final Log LOG = LogFactory.getLog(PartFilterExprUtil.class.getName()); + + + public static ExpressionTree makeExpressionTree(PartitionExpressionProxy expressionProxy, + byte[] expr) throws MetaException { + // We will try pushdown first, so make the filter. This will also validate the expression, + // if serialization fails we will throw incompatible metastore error to the client. + String filter = null; + try { + filter = expressionProxy.convertExprToFilter(expr); + } catch (MetaException ex) { + throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); + } + + // Make a tree out of the filter. + // TODO: this is all pretty ugly. The only reason we need all these transformations + // is to maintain support for simple filters for HCat users that query metastore. + // If forcing everyone to use thick client is out of the question, maybe we could + // parse the filter into standard hive expressions and not all this separate tree + // Filter.g stuff. That way this method and ...ByFilter would just be merged. + return PartFilterExprUtil.makeExpressionTree(filter); + } + + + /** + * Creates the proxy used to evaluate expressions. This is here to prevent circular + * dependency - ql -> metastore client <-> metastore server -> ql. If server and + * client are split, this can be removed. + * @param conf Configuration. + * @return The partition expression proxy. + */ + public static PartitionExpressionProxy createExpressionProxy(Configuration conf) { + String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + try { + @SuppressWarnings("unchecked") + Class clazz = + (Class)MetaStoreUtils.getClass(className); + return MetaStoreUtils.newInstance( + clazz, new Class[0], new Object[0]); + } catch (MetaException e) { + LOG.error("Error loading PartitionExpressionProxy", e); + throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); + } + } + + /** + * Makes expression tree out of expr. + * @param filter Filter. + * @return Expression tree. Null if there was an error. + */ + private static ExpressionTree makeExpressionTree(String filter) throws MetaException { + // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g. + if (filter == null || filter.isEmpty()) { + return ExpressionTree.EMPTY_TREE; + } + LOG.debug("Filter specified is " + filter); + ExpressionTree tree = null; + try { + tree = getFilterParser(filter).tree; + } catch (MetaException ex) { + LOG.info("Unable to make the expression tree from expression string [" + + filter + "]" + ex.getMessage()); // Don't log the stack, this is normal. + } + if (tree == null) { + return null; + } + // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here. + LikeChecker lc = new LikeChecker(); + tree.accept(lc); + return lc.hasLike() ? null : tree; + } + + + private static class LikeChecker extends ExpressionTree.TreeVisitor { + private boolean hasLike; + + public boolean hasLike() { + return hasLike; + } + + @Override + protected boolean shouldStop() { + return hasLike; + } + + @Override + protected void visit(LeafNode node) throws MetaException { + hasLike = hasLike || (node.operator == Operator.LIKE); + } + } + + public static FilterParser getFilterParser(String filter) throws MetaException { + FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter)); + CommonTokenStream tokens = new CommonTokenStream(lexer); + + FilterParser parser = new FilterParser(tokens); + try { + parser.filter(); + } catch(RecognitionException re) { + throw new MetaException("Error parsing partition filter; lexer error: " + + lexer.errorMsg + "; exception " + re); + } + + if (lexer.errorMsg != null) { + throw new MetaException("Error parsing partition filter : " + lexer.errorMsg); + } + return parser; + } + + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 7c85eea..1968256 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -22,14 +22,13 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; -import java.util.SortedSet; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; @@ -49,19 +48,12 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -260,27 +252,27 @@ public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, S public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract List listPrincipalGlobalGrants(String principalName, + public abstract List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); - public abstract List listPrincipalDBGrants(String principalName, + public abstract List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName); - public abstract List listAllTableGrants( + public abstract List listAllTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName); - public abstract List listPrincipalPartitionGrants( + public abstract List listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String dbName, - String tableName, String partName); + String tableName, List partValues, String partName); - public abstract List listPrincipalTableColumnGrants( + public abstract List listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String columnName); - public abstract List listPrincipalPartitionColumnGrants( + public abstract List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, - String tableName, String partName, String columnName); + String tableName, List partValues, String partName, String columnName); public abstract boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; @@ -293,16 +285,19 @@ public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean gran public List listRoleNames(); - public List listRoles(String principalName, + public List listRoles(String principalName, PrincipalType principalType); + public List listRolesWithGrants(String principalName, + PrincipalType principalType); + /** * Get the role to principal grant mapping for given role * @param roleName * @return */ - public List listRoleMembers(String roleName); + public List listRoleMembers(String roleName); public abstract Partition getPartitionWithAuth(String dbName, String tblName, @@ -358,10 +353,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore - * @param partVals - * - * @param ColumnStats object to persist - * @param List of partVals + * @param colStats object to persist * @return Boolean indicating the outcome of the operation * @throws NoSuchObjectException * @throws MetaException @@ -374,8 +366,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) /** Persists the given column statistics object to the metastore * @param partVals * - * @param ColumnStats object to persist - * @param List of partVals + * @param statsObj object to persist * @return Boolean indicating the outcome of the operation * @throws NoSuchObjectException * @throws MetaException @@ -390,9 +381,9 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * Returns the relevant column statistics for a given column in a given table in a given database * if such statistics exist. * - * @param The name of the database, defaults to current database - * @param The name of the table - * @param The name of the column for which statistics is requested + * @param dbName name of the database, defaults to current database + * @param tableName name of the table + * @param colName names of the columns for which statistics is requested * @return Relevant column statistics for the column for the given table * @throws NoSuchObjectException * @throws MetaException @@ -520,7 +511,7 @@ public void createFunction(Function func) /** * Alter function based on new function specs. * @param dbName - * @param name + * @param funcName * @param newFunction * @throws InvalidObjectException * @throws MetaException @@ -531,7 +522,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) /** * Drop a function definition. * @param dbName - * @param functionName + * @param funcName * @return * @throws MetaException * @throws NoSuchObjectException @@ -544,7 +535,7 @@ public void dropFunction(String dbName, String funcName) /** * Retrieve function by name. * @param dbName - * @param functionName + * @param funcName * @return * @throws MetaException */ @@ -596,5 +587,14 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * @return */ public CurrentNotificationEventId getCurrentNotificationEventId(); - + + /* + * Flush any catalog objects held by the metastore implementation. Note that this does not + * flush statistics objects. This should be called at the beginning of each query. + */ + public void flushCache(); + + ByteBuffer[] getFileMetadata(List fileIds) throws MetaException; + + void putFileMetadata(List fileIds, List metadata) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java index 5bde45b..c3755ef 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java @@ -27,6 +27,8 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.lang.ClassUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; @@ -38,6 +40,8 @@ @InterfaceStability.Evolving public class RawStoreProxy implements InvocationHandler { + static final private Log LOG = LogFactory.getLog(RawStoreProxy.class.getName()); + private final RawStore base; private final MetaStoreInit.MetaStoreInitData metaStoreInitData = new MetaStoreInit.MetaStoreInitData(); @@ -110,7 +114,6 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl Deadline.startTimer(method.getName()); isTimerStarted = true; } - ret = method.invoke(base, args); if (isTimerStarted) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java new file mode 100644 index 0000000..89c3e7b --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/AggrStatsInvalidatorFilter.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterBase; +import org.apache.hive.common.util.BloomFilter; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +/** + * Filter for scanning aggregates stats table + */ +public class AggrStatsInvalidatorFilter extends FilterBase { + private static final Log LOG = + LogFactory.getLog(AggrStatsInvalidatorFilter.class.getName()); + private final List entries; + private final long runEvery; + private final long maxCacheEntryLife; + // This class is not serializable, so I realize transient doesn't mean anything. It's just to + // comunicate that we don't serialize this and ship it across to the filter on the other end. + // We use the time the filter is actually instantiated in HBase. + private transient long now; + + public static Filter parseFrom(byte[] serialized) throws DeserializationException { + try { + return new AggrStatsInvalidatorFilter( + HbaseMetastoreProto.AggrStatsInvalidatorFilter.parseFrom(serialized)); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + } + + /** + * @param proto Protocol buffer representation of this filter. + */ + AggrStatsInvalidatorFilter(HbaseMetastoreProto.AggrStatsInvalidatorFilter proto) { + this.entries = proto.getToInvalidateList(); + this.runEvery = proto.getRunEvery(); + this.maxCacheEntryLife = proto.getMaxCacheEntryLife(); + now = System.currentTimeMillis(); + } + + @Override + public byte[] toByteArray() throws IOException { + return HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + .addAllToInvalidate(entries) + .setRunEvery(runEvery) + .setMaxCacheEntryLife(maxCacheEntryLife) + .build() + .toByteArray(); + } + + @Override + public boolean filterAllRemaining() throws IOException { + return false; + } + + @Override + public ReturnCode filterKeyValue(Cell cell) throws IOException { + // Is this the partition we want? + if (Arrays.equals(CellUtil.cloneQualifier(cell), HBaseReadWrite.AGGR_STATS_BLOOM_COL)) { + HbaseMetastoreProto.AggrStatsBloomFilter fromCol = + HbaseMetastoreProto.AggrStatsBloomFilter.parseFrom(CellUtil.cloneValue(cell)); + BloomFilter bloom = null; + if (now - maxCacheEntryLife > fromCol.getAggregatedAt()) { + // It's too old, kill it regardless of whether we were asked to or not. + return ReturnCode.INCLUDE; + } else if (now - runEvery * 2 <= fromCol.getAggregatedAt()) { + // It's too new. We might be stomping on something that was just created. Skip it. + return ReturnCode.NEXT_ROW; + } else { + // Look through each of our entries and see if any of them match. + for (HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry : entries) { + // First check if we match on db and table match + if (entry.getDbName().equals(fromCol.getDbName()) && + entry.getTableName().equals(fromCol.getTableName())) { + if (bloom == null) { + // Now, reconstitute the bloom filter and probe it with each of our partition names + bloom = new BloomFilter( + fromCol.getBloomFilter().getBitsList(), + fromCol.getBloomFilter().getNumBits(), + fromCol.getBloomFilter().getNumFuncs()); + } + if (bloom.test(entry.getPartName().toByteArray())) { + // This is most likely a match, so mark it and quit looking. + return ReturnCode.INCLUDE; + } + } + } + } + return ReturnCode.NEXT_ROW; + } else { + return ReturnCode.NEXT_COL; + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java new file mode 100644 index 0000000..2359939 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/Counter.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; + +/** + * A simple metric to count how many times something occurs. + */ +class Counter { + private final String name; + private long cnt; + + Counter(String name) { + this.name = name; + cnt = 0; + } + + void incr() { + cnt++; + } + + void clear() { + cnt = 0; + } + + String dump() { + StringBuilder bldr = new StringBuilder("Dumping metric: "); + bldr.append(name).append(' ').append(cnt); + return bldr.toString(); + } + + @VisibleForTesting long getCnt() { + return cnt; + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java new file mode 100644 index 0000000..696e588 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseConnection.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hbase.client.HTableInterface; + +import java.io.IOException; +import java.util.List; + +/** + * A connection to HBase. Separated out as an interface so we can slide different transaction + * managers between our code and HBase. + */ +public interface HBaseConnection extends Configurable { + + /** + * Connects to HBase. This must be called after {@link #setConf} has been called. + * @throws IOException + */ + void connect() throws IOException; + + /** + * Close the connection. No further operations are possible after this is done. + * @throws IOException + */ + void close() throws IOException; + + /** + * Begin a transaction. + * @throws IOException + */ + void beginTransaction() throws IOException; + + /** + * Commit a transaction + * @throws IOException indicates the commit has failed + */ + void commitTransaction() throws IOException; + + /** + * Rollback a transaction + * @throws IOException + */ + void rollbackTransaction() throws IOException; + + /** + * Flush commits. A no-op for transaction implementations since they will write at commit time. + * @param htab Table to flush + * @throws IOException + */ + void flush(HTableInterface htab) throws IOException; + + /** + * Create a new table + * @param tableName name of the table + * @param columnFamilies name of the column families in the table + * @throws IOException + */ + void createHBaseTable(String tableName, List columnFamilies) throws IOException; + + /** + * Fetch an existing HBase table. + * @param tableName name of the table + * @return table handle + * @throws IOException + */ + HTableInterface getHBaseTable(String tableName) throws IOException; + + /** + * Fetch an existing HBase table and force a connection to it. This should be used only in + * cases where you want to assure that the table exists (ie at install). + * @param tableName name of the table + * @param force if true, force a connection by fetching a non-existant key + * @return table handle + * @throws IOException + */ + HTableInterface getHBaseTable(String tableName, boolean force) throws IOException; + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java new file mode 100644 index 0000000..9762309 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseFilterPlanUtil.java @@ -0,0 +1,612 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; + + +/** + * Utility function for generating hbase partition filtering plan representation + * from ExpressionTree. + * Optimizations to be done - + * - Case where all partition keys are specified. Should use a get + * + * {@link PartitionFilterGenerator} is a visitor on the given filter expression tree. After + * walking it it produces the HBase execution plan represented by {@link FilterPlan}. See + * their javadocs for more details. + */ +class HBaseFilterPlanUtil { + + /** + * Compare two byte arrays. + * + * @param ar1 + * first byte array + * @param ar2 + * second byte array + * @return -1 if ar1 < ar2, 0 if == , 1 if > + */ + static int compare(byte[] ar1, byte[] ar2) { + // null check is not needed, nulls are not passed here + for (int i = 0; i < ar1.length; i++) { + if (i == ar2.length) { + return 1; + } else { + if (ar1[i] == ar2[i]) { + continue; + } else if (ar1[i] > ar2[i]) { + return 1; + } else { + return -1; + } + } + } + // ar2 equal until length of ar1. + if(ar1.length == ar2.length) { + return 0; + } + // ar2 has more bytes + return -1; + } + + /** + * Represents the execution plan for hbase to find the set of partitions that + * match given filter expression. + * If you have an AND or OR of two expressions, you can determine FilterPlan for each + * children and then call lhs.and(rhs) or lhs.or(rhs) respectively + * to generate a new plan for the expression. + * + * The execution plan has one or more ScanPlan objects. To get the results the set union of all + * ScanPlan objects needs to be done. + */ + public static abstract class FilterPlan { + abstract FilterPlan and(FilterPlan other); + abstract FilterPlan or(FilterPlan other); + abstract List getPlans(); + @Override + public String toString() { + return getPlans().toString(); + } + + } + + /** + * Represents a union/OR of single scan plans (ScanPlan). + */ + public static class MultiScanPlan extends FilterPlan { + final ImmutableList scanPlans; + + public MultiScanPlan(List scanPlans){ + this.scanPlans = ImmutableList.copyOf(scanPlans); + } + + @Override + public FilterPlan and(FilterPlan other) { + // Convert to disjunctive normal form (DNF), ie OR of ANDs + // First get a new set of FilterPlans by doing an AND + // on each ScanPlan in this one with the other FilterPlan + List newFPlans = new ArrayList(); + for (ScanPlan splan : getPlans()) { + newFPlans.add(splan.and(other)); + } + //now combine scanPlans in multiple new FilterPlans into one + // MultiScanPlan + List newScanPlans = new ArrayList(); + for (FilterPlan fp : newFPlans) { + newScanPlans.addAll(fp.getPlans()); + } + return new MultiScanPlan(newScanPlans); + } + + @Override + public FilterPlan or(FilterPlan other) { + // just combine the ScanPlans + List newScanPlans = new ArrayList(this.getPlans()); + newScanPlans.addAll(other.getPlans()); + return new MultiScanPlan(newScanPlans); + } + + @Override + public List getPlans() { + return scanPlans; + } + } + + /** + * Represents a single Hbase Scan api call + */ + public static class ScanPlan extends FilterPlan { + + public static class ScanMarker { + final String value; + /** + * If inclusive = true, it means that the + * marker includes those bytes. + * If it is false, it means the marker starts at the next possible byte array + * or ends at the next possible byte array + */ + final boolean isInclusive; + final String type; + ScanMarker(String obj, boolean i, String type){ + this.value = obj; + this.isInclusive = i; + this.type = type; + } + @Override + public String toString() { + return "ScanMarker [" + "value=" + value.toString() + ", isInclusive=" + isInclusive + + ", type=" + type + "]"; + } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + value.hashCode(); + result = prime * result + (isInclusive ? 1231 : 1237); + result = prime * result + type.hashCode(); + return result; + } + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + ScanMarker other = (ScanMarker) obj; + if (!value.equals(other.value)) + return false; + if (isInclusive != other.isInclusive) + return false; + if (type != other.type) + return false; + return true; + } + } + public static class ScanMarkerPair { + public ScanMarkerPair(ScanMarker startMarker, ScanMarker endMarker) { + this.startMarker = startMarker; + this.endMarker = endMarker; + } + ScanMarker startMarker; + ScanMarker endMarker; + } + // represent Scan start, partition key name -> scanMarkerPair + Map markers = new HashMap(); + List ops = new ArrayList(); + + // Get the number of partition key prefixes which can be used in the scan range. + // For example, if partition key is (year, month, state) + // 1. year = 2015 and month >= 1 and month < 5 + // year + month can be used in scan range, majorParts = 2 + // 2. year = 2015 and state = 'CA' + // only year can be used in scan range, majorParts = 1 + // 3. month = 10 and state = 'CA' + // nothing can be used in scan range, majorParts = 0 + private int getMajorPartsCount(List parts) { + int majorPartsCount = 0; + while (majorPartsCount parts) { + int majorPartsCount = getMajorPartsCount(parts); + Set majorKeys = new HashSet(); + for (int i=0;i names = HBaseUtils.getPartitionNames(parts); + List ranges = new ArrayList(); + for (Map.Entry entry : markers.entrySet()) { + if (names.contains(entry.getKey()) && !majorKeys.contains(entry.getKey())) { + PartitionKeyComparator.Mark startMark = null; + if (entry.getValue().startMarker != null) { + startMark = new PartitionKeyComparator.Mark(entry.getValue().startMarker.value, + entry.getValue().startMarker.isInclusive); + } + PartitionKeyComparator.Mark endMark = null; + if (entry.getValue().endMarker != null) { + startMark = new PartitionKeyComparator.Mark(entry.getValue().endMarker.value, + entry.getValue().endMarker.isInclusive); + } + PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( + entry.getKey(), startMark, endMark); + ranges.add(range); + } + } + + if (ranges.isEmpty() && ops.isEmpty()) { + return null; + } else { + return new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator( + StringUtils.join(names, ","), StringUtils.join(HBaseUtils.getPartitionKeyTypes(parts), ","), + ranges, ops)); + } + } + + public void setStartMarker(String keyName, String keyType, String start, boolean isInclusive) { + if (markers.containsKey(keyName)) { + markers.get(keyName).startMarker = new ScanMarker(start, isInclusive, keyType); + } else { + ScanMarkerPair marker = new ScanMarkerPair(new ScanMarker(start, isInclusive, keyType), null); + markers.put(keyName, marker); + } + } + + public ScanMarker getStartMarker(String keyName) { + if (markers.containsKey(keyName)) { + return markers.get(keyName).startMarker; + } else { + return null; + } + } + + public void setEndMarker(String keyName, String keyType, String end, boolean isInclusive) { + if (markers.containsKey(keyName)) { + markers.get(keyName).endMarker = new ScanMarker(end, isInclusive, keyType); + } else { + ScanMarkerPair marker = new ScanMarkerPair(null, new ScanMarker(end, isInclusive, keyType)); + markers.put(keyName, marker); + } + } + + public ScanMarker getEndMarker(String keyName) { + if (markers.containsKey(keyName)) { + return markers.get(keyName).endMarker; + } else { + return null; + } + } + + @Override + public FilterPlan and(FilterPlan other) { + List newSPlans = new ArrayList(); + for (ScanPlan otherSPlan : other.getPlans()) { + newSPlans.add(this.and(otherSPlan)); + } + return new MultiScanPlan(newSPlans); + } + + private ScanPlan and(ScanPlan other) { + // create combined FilterPlan based on existing lhs and rhs plan + ScanPlan newPlan = new ScanPlan(); + newPlan.markers.putAll(markers); + + for (String keyName : other.markers.keySet()) { + if (newPlan.markers.containsKey(keyName)) { + // create new scan start + ScanMarker greaterStartMarker = getComparedMarker(this.getStartMarker(keyName), + other.getStartMarker(keyName), true); + if (greaterStartMarker != null) { + newPlan.setStartMarker(keyName, greaterStartMarker.type, greaterStartMarker.value, greaterStartMarker.isInclusive); + } + + // create new scan end + ScanMarker lesserEndMarker = getComparedMarker(this.getEndMarker(keyName), other.getEndMarker(keyName), + false); + if (lesserEndMarker != null) { + newPlan.setEndMarker(keyName, lesserEndMarker.type, lesserEndMarker.value, lesserEndMarker.isInclusive); + } + } else { + newPlan.markers.put(keyName, other.markers.get(keyName)); + } + } + + newPlan.ops.addAll(ops); + newPlan.ops.addAll(other.ops); + return newPlan; + } + + /** + * @param lStartMarker + * @param rStartMarker + * @param getGreater if true return greater startmarker, else return smaller one + * @return greater/lesser marker depending on value of getGreater + */ + @VisibleForTesting + static ScanMarker getComparedMarker(ScanMarker lStartMarker, ScanMarker rStartMarker, + boolean getGreater) { + // if one of them has null bytes, just return other + if(lStartMarker == null) { + return rStartMarker; + } else if (rStartMarker == null) { + return lStartMarker; + } + TypeInfo expectedType = + TypeInfoUtils.getTypeInfoFromTypeString(lStartMarker.type); + ObjectInspector outputOI = + TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); + Converter lConverter = ObjectInspectorConverters.getConverter( + PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); + Converter rConverter = ObjectInspectorConverters.getConverter( + PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); + Comparable lValue = (Comparable)lConverter.convert(lStartMarker.value); + Comparable rValue = (Comparable)rConverter.convert(rStartMarker.value); + + int compareRes = lValue.compareTo(rValue); + if (compareRes == 0) { + // bytes are equal, now compare the isInclusive flags + if (lStartMarker.isInclusive == rStartMarker.isInclusive) { + // actually equal, so return any one + return lStartMarker; + } + boolean isInclusive = true; + // one that does not include the current bytes is greater + if (getGreater) { + isInclusive = false; + } + // else + return new ScanMarker(lStartMarker.value, isInclusive, lStartMarker.type); + } + if (getGreater) { + return compareRes == 1 ? lStartMarker : rStartMarker; + } + // else + return compareRes == -1 ? lStartMarker : rStartMarker; + } + + + @Override + public FilterPlan or(FilterPlan other) { + List plans = new ArrayList(getPlans()); + plans.addAll(other.getPlans()); + return new MultiScanPlan(plans); + } + + @Override + public List getPlans() { + return Arrays.asList(this); + } + + + /** + * @return row suffix - This is appended to db + table, to generate start row for the Scan + */ + public byte[] getStartRowSuffix(String dbName, String tableName, List parts) { + int majorPartsCount = getMajorPartsCount(parts); + List majorPartTypes = new ArrayList(); + List components = new ArrayList(); + boolean endPrefix = false; + for (int i=0;i parts) { + int majorPartsCount = getMajorPartsCount(parts); + List majorPartTypes = new ArrayList(); + List components = new ArrayList(); + boolean endPrefix = false; + for (int i=0;i entry : markers.entrySet()) { + sb.append("key=" + entry.getKey() + "[startMarker=" + entry.getValue().startMarker + + ", endMarker=" + entry.getValue().endMarker + "]"); + } + return sb.toString(); + } + + } + + /** + * Visitor for ExpressionTree. + * It first generates the ScanPlan for the leaf nodes. The higher level nodes are + * either AND or OR operations. It then calls FilterPlan.and and FilterPlan.or with + * the child nodes to generate the plans for higher level nodes. + */ + @VisibleForTesting + static class PartitionFilterGenerator extends TreeVisitor { + private FilterPlan curPlan; + + // this tells us if there is a condition that did not get included in the plan + // such condition would be treated as getting evaluated to TRUE + private boolean hasUnsupportedCondition = false; + + //Need to cache the left plans for the TreeNode. Use IdentityHashMap here + // as we don't want to dedupe on two TreeNode that are otherwise considered equal + Map leftPlans = new IdentityHashMap(); + + // temporary params for current left and right side plans, for AND, OR + private FilterPlan rPlan; + + private Map nameToType = new HashMap(); + + public PartitionFilterGenerator(List parts) { + for (FieldSchema part : parts) { + nameToType.put(part.getName(), part.getType()); + } + } + + FilterPlan getPlan() { + return curPlan; + } + + @Override + protected void beginTreeNode(TreeNode node) throws MetaException { + // reset the params + curPlan = rPlan = null; + } + + @Override + protected void midTreeNode(TreeNode node) throws MetaException { + leftPlans.put(node, curPlan); + curPlan = null; + } + + @Override + protected void endTreeNode(TreeNode node) throws MetaException { + rPlan = curPlan; + FilterPlan lPlan = leftPlans.get(node); + leftPlans.remove(node); + + switch (node.getAndOr()) { + case AND: + curPlan = lPlan.and(rPlan); + break; + case OR: + curPlan = lPlan.or(rPlan); + break; + default: + throw new AssertionError("Unexpected logical operation " + node.getAndOr()); + } + + } + + + @Override + public void visit(LeafNode node) throws MetaException { + ScanPlan leafPlan = new ScanPlan(); + curPlan = leafPlan; + + // this is a condition on first partition column, so might influence the + // start and end of the scan + final boolean INCLUSIVE = true; + switch (node.operator) { + case EQUALS: + leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); + leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); + break; + case GREATERTHAN: + leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE); + break; + case GREATERTHANOREQUALTO: + leafPlan.setStartMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); + break; + case LESSTHAN: + leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), !INCLUSIVE); + break; + case LESSTHANOREQUALTO: + leafPlan.setEndMarker(node.keyName, nameToType.get(node.keyName), node.value.toString(), INCLUSIVE); + break; + case LIKE: + leafPlan.ops.add(new Operator(Operator.Type.LIKE, node.keyName, node.value.toString())); + break; + case NOTEQUALS: + case NOTEQUALS2: + leafPlan.ops.add(new Operator(Operator.Type.NOTEQUALS, node.keyName, node.value.toString())); + break; + } + } + + private boolean hasUnsupportedCondition() { + return hasUnsupportedCondition; + } + + } + + public static class PlanResult { + public final FilterPlan plan; + public final boolean hasUnsupportedCondition; + PlanResult(FilterPlan plan, boolean hasUnsupportedCondition) { + this.plan = plan; + this.hasUnsupportedCondition = hasUnsupportedCondition; + } + } + + public static PlanResult getFilterPlan(ExpressionTree exprTree, List parts) throws MetaException { + if (exprTree == null) { + // TODO: if exprTree is null, we should do what ObjectStore does. See HIVE-10102 + return new PlanResult(new ScanPlan(), true); + } + PartitionFilterGenerator pGenerator = new PartitionFilterGenerator(parts); + exprTree.accept(pGenerator); + return new PlanResult(pGenerator.getPlan(), pGenerator.hasUnsupportedCondition()); + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java new file mode 100644 index 0000000..fac8e90 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java @@ -0,0 +1,535 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.Deadline; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.Table; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + +/** + * A tool to take the contents of an RDBMS based Hive metastore and import it into an HBase based + * one. To use this the config files for Hive configured to work with the RDBMS (that is, + * including the JDBC string, etc.) as well as HBase configuration files must be in the path. + * There should not be a hive-site.xml that specifies HBaseStore in the path. This tool will then + * handle connecting to the RDBMS via the {@link org.apache.hadoop.hive.metastore.ObjectStore} + * and HBase via {@link org.apache.hadoop.hive.metastore.hbase.HBaseStore} and transferring the + * data. + * + * This tool can import an entire metastore or only selected objects. When selecting objects it + * is necessary to fully specify the object's name. For example, if you want to import the table + * T in the default database it needs to be identified as default.T. The same is true for + * functions. When an object is specified, everything under that object will be imported (e.g. + * if you select database D, then all tables and functions in that database will be + * imported as well). + * + * At this point only tables and partitions are handled in parallel as it is assumed there are + * relatively few of everything else. + * + * Note that HBaseSchemaTool must have already been used to create the appropriate tables in HBase. + */ +public class HBaseImport { + + static final private Log LOG = LogFactory.getLog(HBaseImport.class.getName()); + + public static int main(String[] args) { + try { + HBaseImport tool = new HBaseImport(); + int rv = tool.init(args); + if (rv != 0) return rv; + tool.run(); + } catch (Exception e) { + System.err.println("Caught exception " + e.getClass().getName() + " with message <" + + e.getMessage() + ">"); + return 1; + } + return 0; + } + + private ThreadLocal rdbmsStore = new ThreadLocal() { + @Override + protected RawStore initialValue() { + if (rdbmsConf == null) { + throw new RuntimeException("order violation, need to set rdbms conf first"); + } + RawStore os = new ObjectStore(); + os.setConf(rdbmsConf); + return os; + } + }; + + private ThreadLocal hbaseStore = new ThreadLocal() { + @Override + protected RawStore initialValue() { + if (hbaseConf == null) { + throw new RuntimeException("order violation, need to set hbase conf first"); + } + RawStore hs = new HBaseStore(); + hs.setConf(hbaseConf); + return hs; + } + }; + + private Configuration rdbmsConf; + private Configuration hbaseConf; + private List dbs; + private BlockingQueue
partitionedTables; + private BlockingQueue tableNameQueue; + private BlockingQueue partQueue; + private boolean writingToQueue, readersFinished; + private boolean doKerberos, doAll; + private List rolesToImport, dbsToImport, tablesToImport, functionsToImport; + private int parallel; + private int batchSize; + + private HBaseImport() {} + + @VisibleForTesting + public HBaseImport(String... args) throws ParseException { + init(args); + } + + private int init(String... args) throws ParseException { + Options options = new Options(); + + doAll = doKerberos = false; + parallel = 1; + batchSize = 1000; + + options.addOption(OptionBuilder + .withLongOpt("all") + .withDescription("Import the full metastore") + .create('a')); + + options.addOption(OptionBuilder + .withLongOpt("batchsize") + .withDescription("Number of partitions to read and write in a batch, defaults to 1000") + .hasArg() + .create('b')); + + options.addOption(OptionBuilder + .withLongOpt("database") + .withDescription("Import a single database") + .hasArgs() + .create('d')); + + options.addOption(OptionBuilder + .withLongOpt("help") + .withDescription("You're looking at it") + .create('h')); + + options.addOption(OptionBuilder + .withLongOpt("function") + .withDescription("Import a single function") + .hasArgs() + .create('f')); + + options.addOption(OptionBuilder + .withLongOpt("kerberos") + .withDescription("Import all kerberos related objects (master key, tokens)") + .create('k')); + + options.addOption(OptionBuilder + .withLongOpt("parallel") + .withDescription("Parallel factor for loading (only applied to tables and partitions), " + + "defaults to 1") + .hasArg() + .create('p')); + + options.addOption(OptionBuilder + .withLongOpt("role") + .withDescription("Import a single role") + .hasArgs() + .create('r')); + + options.addOption(OptionBuilder + .withLongOpt("tables") + .withDescription("Import a single tables") + .hasArgs() + .create('t')); + + CommandLine cli = new GnuParser().parse(options, args); + + // Process help, if it was asked for, this must be done first + if (cli.hasOption('h')) { + printHelp(options); + return 1; + } + + boolean hasCmd = false; + // Now process the other command line args + if (cli.hasOption('a')) { + hasCmd = true; + doAll = true; + } + if (cli.hasOption('b')) { + batchSize = Integer.valueOf(cli.getOptionValue('b')); + } + if (cli.hasOption('d')) { + hasCmd = true; + dbsToImport = Arrays.asList(cli.getOptionValues('d')); + } + if (cli.hasOption('f')) { + hasCmd = true; + functionsToImport = Arrays.asList(cli.getOptionValues('f')); + } + if (cli.hasOption('p')) { + parallel = Integer.valueOf(cli.getOptionValue('p')); + } + if (cli.hasOption('r')) { + hasCmd = true; + rolesToImport = Arrays.asList(cli.getOptionValues('r')); + } + if (cli.hasOption('k')) { + doKerberos = true; + } + if (cli.hasOption('t')) { + hasCmd = true; + tablesToImport = Arrays.asList(cli.getOptionValues('t')); + } + if (!hasCmd) { + printHelp(options); + return 1; + } + + dbs = new ArrayList<>(); + // We don't want to bound the size of the table queue because we keep it all in memory + partitionedTables = new LinkedBlockingQueue<>(); + tableNameQueue = new LinkedBlockingQueue<>(); + + // Bound the size of this queue so we don't get too much in memory. + partQueue = new ArrayBlockingQueue<>(parallel * 2); + return 0; + } + + private void printHelp(Options options) { + (new HelpFormatter()).printHelp("hbaseschematool", options); + } + + @VisibleForTesting + void run() throws MetaException, InstantiationException, IllegalAccessException, + NoSuchObjectException, InvalidObjectException, InterruptedException { + // Order here is crucial, as you can't add tables until you've added databases, etc. + init(); + if (doAll || rolesToImport != null) { + copyRoles(); + } + if (doAll || dbsToImport != null) { + copyDbs(); + } + if (doAll || dbsToImport != null || tablesToImport != null) { + copyTables(); + copyPartitions(); + } + if (doAll || dbsToImport != null || functionsToImport != null) { + copyFunctions(); + } + if (doAll || doKerberos) { + copyKerberos(); + } + } + + private void init() throws MetaException, IllegalAccessException, InstantiationException { + if (rdbmsConf != null) { + // We've been configured for testing, so don't do anything here. + return; + } + // We're depending on having everything properly in the path + rdbmsConf = new HiveConf(); + hbaseConf = new HiveConf();// + HiveConf.setVar(hbaseConf, HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, + HBaseStore.class.getName()); + HiveConf.setBoolVar(hbaseConf, HiveConf.ConfVars.METASTORE_FASTPATH, true); + + // First get a connection to the RDBMS based store + rdbmsStore.get().setConf(rdbmsConf); + + // Get a connection to the HBase based store + hbaseStore.get().setConf(hbaseConf); + } + + private void copyRoles() throws NoSuchObjectException, InvalidObjectException, MetaException { + screen("Copying roles"); + List toCopy = doAll ? rdbmsStore.get().listRoleNames() : rolesToImport; + for (String roleName : toCopy) { + Role role = rdbmsStore.get().getRole(roleName); + screen("Copying role " + roleName); + hbaseStore.get().addRole(roleName, role.getOwnerName()); + } + } + + private void copyDbs() throws MetaException, NoSuchObjectException, InvalidObjectException { + screen("Copying databases"); + List toCopy = doAll ? rdbmsStore.get().getAllDatabases() : dbsToImport; + for (String dbName : toCopy) { + Database db = rdbmsStore.get().getDatabase(dbName); + dbs.add(db); + screen("Copying database " + dbName); + hbaseStore.get().createDatabase(db); + } + } + + private void copyTables() throws MetaException, InvalidObjectException, InterruptedException { + screen("Copying tables"); + + // Start the parallel threads that will copy the tables + Thread[] copiers = new Thread[parallel]; + writingToQueue = true; + for (int i = 0; i < parallel; i++) { + copiers[i] = new TableCopier(); + copiers[i].start(); + } + + // Put tables from the databases we copied into the queue + for (Database db : dbs) { + screen("Coyping tables in database " + db.getName()); + for (String tableName : rdbmsStore.get().getAllTables(db.getName())) { + tableNameQueue.put(new String[]{db.getName(), tableName}); + } + } + + // Now put any specifically requested tables into the queue + if (tablesToImport != null) { + for (String compoundTableName : tablesToImport) { + String[] tn = compoundTableName.split("\\."); + if (tn.length != 2) { + error(compoundTableName + " not in proper form. Must be in form dbname.tablename. " + + "Ignoring this table and continuing."); + } else { + tableNameQueue.put(new String[]{tn[0], tn[1]}); + } + } + } + writingToQueue = false; + + // Wait until we've finished adding all the tables + for (Thread copier : copiers) copier.join(); + } + + private class TableCopier extends Thread { + @Override + public void run() { + while (writingToQueue || tableNameQueue.size() > 0) { + try { + String[] name = tableNameQueue.poll(1, TimeUnit.SECONDS); + if (name != null) { + Table table = rdbmsStore.get().getTable(name[0], name[1]); + // If this has partitions, put it in the list to fetch partions for + if (table.getPartitionKeys() != null && table.getPartitionKeys().size() > 0) { + partitionedTables.put(table); + } + screen("Copying table " + name[0] + "." + name[1]); + hbaseStore.get().createTable(table); + } + } catch (InterruptedException | MetaException | InvalidObjectException e) { + throw new RuntimeException(e); + } + } + } + } + + /* Partition copying is a little complex. As we went through and copied the tables we put each + * partitioned table into a queue. We will now go through that queue and add partitions for the + * tables. We do the finding of partitions and writing of them separately and in parallel. + * This way if there is one table with >> partitions then all of the others that skew won't + * hurt us. To avoid pulling all of the partitions for a table into memory, we batch up + * partitions (by default in batches of 1000) and copy them over in batches. + */ + private void copyPartitions() throws MetaException, NoSuchObjectException, + InvalidObjectException, InterruptedException { + screen("Copying partitions"); + readersFinished = false; + Thread[] readers = new Thread[parallel]; + Thread[] writers = new Thread[parallel]; + for (int i = 0; i < parallel; i++) { + readers[i] = new PartitionReader(); + readers[i].start(); + writers[i] = new PartitionWriter(); + writers[i].start(); + } + + for (Thread reader : readers) reader.join(); + readersFinished = true; + + // Wait until we've finished adding all the partitions + for (Thread writer : writers) writer.join(); + } + + private class PartitionReader extends Thread { + @Override + public void run() { + while (partitionedTables.size() > 0) { + try { + Table table = partitionedTables.poll(1, TimeUnit.SECONDS); + if (table != null) { + screen("Fetching partitions for table " + table.getDbName() + "." + + table.getTableName()); + List partNames = + rdbmsStore.get().listPartitionNames(table.getDbName(), table.getTableName(), + (short) -1); + if (partNames.size() <= batchSize) { + LOG.debug("Adding all partition names to queue for " + table.getDbName() + "." + + table.getTableName()); + partQueue.put(new PartQueueEntry(table.getDbName(), table.getTableName(), partNames)); + } else { + int goUntil = partNames.size() % batchSize == 0 ? partNames.size() / batchSize : + partNames.size() / batchSize + 1; + for (int i = 0; i < goUntil; i++) { + int start = i * batchSize; + int end = Math.min((i + 1) * batchSize, partNames.size()); + LOG.debug("Adding partitions " + start + " to " + end + " for " + table.getDbName() + + "." + table.getTableName()); + partQueue.put(new PartQueueEntry(table.getDbName(), table.getTableName(), + partNames.subList(start, end))); + } + } + } + } catch (InterruptedException | MetaException e) { + throw new RuntimeException(e); + } + } + } + } + + private class PartitionWriter extends Thread { + @Override + public void run() { + // This keeps us from throwing exceptions in our raw store calls + Deadline.registerIfNot(1000000); + while (!readersFinished || partQueue.size() > 0) { + try { + PartQueueEntry entry = partQueue.poll(1, TimeUnit.SECONDS); + if (entry != null) { + LOG.info("Writing partitions " + entry.dbName + "." + entry.tableName + "." + + StringUtils.join(entry.partNames, ':')); + // Fetch these partitions and write them to HBase + Deadline.startTimer("hbaseimport"); + List parts = + rdbmsStore.get().getPartitionsByNames(entry.dbName, entry.tableName, + entry.partNames); + hbaseStore.get().addPartitions(entry.dbName, entry.tableName, parts); + Deadline.stopTimer(); + } + } catch (InterruptedException | MetaException | InvalidObjectException | + NoSuchObjectException e) { + throw new RuntimeException(e); + } + } + } + } + + private void copyFunctions() throws MetaException, NoSuchObjectException, InvalidObjectException { + screen("Copying functions"); + // Copy any functions from databases we copied. + for (Database db : dbs) { + screen("Copying functions in database " + db.getName()); + for (String funcName : rdbmsStore.get().getFunctions(db.getName(), "*")) { + copyOneFunction(db.getName(), funcName); + } + } + // Now do any specifically requested functions + if (functionsToImport != null) { + for (String compoundFuncName : functionsToImport) { + String[] fn = compoundFuncName.split("\\."); + if (fn.length != 2) { + error(compoundFuncName + " not in proper form. Must be in form dbname.funcname. " + + "Ignoring this function and continuing."); + } else { + copyOneFunction(fn[0], fn[1]); + } + } + } + } + + private void copyOneFunction(String dbName, String funcName) throws MetaException, + InvalidObjectException { + Function func = rdbmsStore.get().getFunction(dbName, funcName); + screen("Copying function " + dbName + "." + funcName); + hbaseStore.get().createFunction(func); + } + + private void copyKerberos() throws MetaException { + screen("Copying kerberos related items"); + for (String tokenId : rdbmsStore.get().getAllTokenIdentifiers()) { + String token = rdbmsStore.get().getToken(tokenId); + hbaseStore.get().addToken(tokenId, token); + } + for (String masterKey : rdbmsStore.get().getMasterKeys()) { + hbaseStore.get().addMasterKey(masterKey); + } + } + + private void screen(String msg) { + LOG.info(msg); + System.out.println(msg); + } + + private void error(String msg) { + LOG.error(msg); + System.err.println("ERROR: " + msg); + } + + @VisibleForTesting + void setConnections(RawStore rdbms, RawStore hbase) { + rdbmsStore.set(rdbms); + hbaseStore.set(hbase); + rdbmsConf = rdbms.getConf(); + hbaseConf = hbase.getConf(); + } + + private static class PartQueueEntry { + final String dbName; + final String tableName; + final List partNames; + + PartQueueEntry(String d, String t, List p) { + dbName = d; + tableName = t; + partNames = p; + } + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java new file mode 100644 index 0000000..d38c561 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -0,0 +1,2106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; + +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; +import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.Operator; +import org.apache.hive.common.util.BloomFilter; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + + +/** + * Class to manage storing object in and reading them from HBase. + */ +public class HBaseReadWrite { + + @VisibleForTesting final static String AGGR_STATS_TABLE = "HBMS_AGGR_STATS"; + @VisibleForTesting final static String DB_TABLE = "HBMS_DBS"; + @VisibleForTesting final static String FUNC_TABLE = "HBMS_FUNCS"; + @VisibleForTesting final static String GLOBAL_PRIVS_TABLE = "HBMS_GLOBAL_PRIVS"; + @VisibleForTesting final static String PART_TABLE = "HBMS_PARTITIONS"; + @VisibleForTesting final static String ROLE_TABLE = "HBMS_ROLES"; + @VisibleForTesting final static String SD_TABLE = "HBMS_SDS"; + @VisibleForTesting final static String SECURITY_TABLE = "HBMS_SECURITY"; + @VisibleForTesting final static String SEQUENCES_TABLE = "HBMS_SEQUENCES"; + @VisibleForTesting final static String TABLE_TABLE = "HBMS_TBLS"; + @VisibleForTesting final static String USER_TO_ROLE_TABLE = "HBMS_USER_TO_ROLE"; + @VisibleForTesting final static String FILE_METADATA_TABLE = "HBMS_FILE_METADATA"; + @VisibleForTesting final static byte[] CATALOG_CF = "c".getBytes(HBaseUtils.ENCODING); + @VisibleForTesting final static byte[] STATS_CF = "s".getBytes(HBaseUtils.ENCODING); + @VisibleForTesting final static String NO_CACHE_CONF = "no.use.cache"; + /** + * List of tables in HBase + */ + public final static String[] tableNames = { AGGR_STATS_TABLE, DB_TABLE, FUNC_TABLE, GLOBAL_PRIVS_TABLE, + PART_TABLE, USER_TO_ROLE_TABLE, ROLE_TABLE, SD_TABLE, + SECURITY_TABLE, SEQUENCES_TABLE, TABLE_TABLE, + FILE_METADATA_TABLE }; + public final static Map> columnFamilies = + new HashMap> (tableNames.length); + + static { + columnFamilies.put(AGGR_STATS_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(DB_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(FUNC_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(GLOBAL_PRIVS_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(PART_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); + columnFamilies.put(USER_TO_ROLE_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(ROLE_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(SD_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(SECURITY_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(SEQUENCES_TABLE, Arrays.asList(CATALOG_CF)); + columnFamilies.put(TABLE_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); + // Stats CF will contain PPD stats. + columnFamilies.put(FILE_METADATA_TABLE, Arrays.asList(CATALOG_CF, STATS_CF)); + } + + /** + * Stores the bloom filter for the aggregated stats, to determine what partitions are in this + * aggregate. + */ + final static byte[] MASTER_KEY_SEQUENCE = "mk".getBytes(HBaseUtils.ENCODING); + final static byte[] AGGR_STATS_BLOOM_COL = "b".getBytes(HBaseUtils.ENCODING); + private final static byte[] CATALOG_COL = "c".getBytes(HBaseUtils.ENCODING); + private final static byte[] ROLES_COL = "roles".getBytes(HBaseUtils.ENCODING); + private final static byte[] REF_COUNT_COL = "ref".getBytes(HBaseUtils.ENCODING); + private final static byte[] DELEGATION_TOKEN_COL = "dt".getBytes(HBaseUtils.ENCODING); + private final static byte[] MASTER_KEY_COL = "mk".getBytes(HBaseUtils.ENCODING); + private final static byte[] AGGR_STATS_STATS_COL = "s".getBytes(HBaseUtils.ENCODING); + private final static byte[] GLOBAL_PRIVS_KEY = "gp".getBytes(HBaseUtils.ENCODING); + private final static byte[] SEQUENCES_KEY = "seq".getBytes(HBaseUtils.ENCODING); + private final static int TABLES_TO_CACHE = 10; + // False positives are very bad here because they cause us to invalidate entries we shouldn't. + // Space used and # of hash functions grows in proportion to ln of num bits so a 10x increase + // in accuracy doubles the required space and number of hash functions. + private final static double STATS_BF_ERROR_RATE = 0.001; + + @VisibleForTesting final static String TEST_CONN = "test_connection"; + private static HBaseConnection testConn; + + static final private Log LOG = LogFactory.getLog(HBaseReadWrite.class.getName()); + + private static ThreadLocal self = new ThreadLocal() { + @Override + protected HBaseReadWrite initialValue() { + if (staticConf == null) { + throw new RuntimeException("Attempt to create HBaseReadWrite with no configuration set"); + } + return new HBaseReadWrite(staticConf); + } + }; + + private static boolean tablesCreated = false; + private static Configuration staticConf = null; + + private final Configuration conf; + private HBaseConnection conn; + private MessageDigest md; + private ObjectCache, Table> tableCache; + private ObjectCache sdCache; + private PartitionCache partCache; + private StatsCache statsCache; + private Counter tableHits; + private Counter tableMisses; + private Counter tableOverflows; + private Counter partHits; + private Counter partMisses; + private Counter partOverflows; + private Counter sdHits; + private Counter sdMisses; + private Counter sdOverflows; + private List counters; + // roleCache doesn't use ObjectCache because I don't want to limit the size. I am assuming + // that the number of roles will always be small (< 100) so caching the whole thing should not + // be painful. + private final Map roleCache; + boolean entireRoleTableInCache; + + /** + * Get the instance of HBaseReadWrite for the current thread. This is intended to be used by + * {@link org.apache.hadoop.hive.metastore.hbase.HBaseStore} since it creates the thread local + * version of this class. + * @param configuration Configuration object + * @return thread's instance of HBaseReadWrite + */ + public static HBaseReadWrite getInstance(Configuration configuration) { + staticConf = configuration; + return self.get(); + } + + /** + * Get the instance of HBaseReadWrite for the current thread. This is inteded to be used after + * the thread has been initialized. Woe betide you if that's not the case. + * @return thread's instance of HBaseReadWrite + */ + static HBaseReadWrite getInstance() { + return self.get(); + } + + private HBaseReadWrite(Configuration configuration) { + conf = configuration; + HBaseConfiguration.addHbaseResources(conf); + + try { + String connClass = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS); + if (TEST_CONN.equals(connClass)) { + conn = testConn; + LOG.debug("Using test connection."); + } else { + LOG.debug("Instantiating connection class " + connClass); + Class c = Class.forName(connClass); + Object o = c.newInstance(); + if (HBaseConnection.class.isAssignableFrom(o.getClass())) { + conn = (HBaseConnection) o; + } else { + throw new IOException(connClass + " is not an instance of HBaseConnection."); + } + conn.setConf(conf); + conn.connect(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + int totalCatalogObjectsToCache = + HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE); + + tableHits = new Counter("table cache hits"); + tableMisses = new Counter("table cache misses"); + tableOverflows = new Counter("table cache overflows"); + partHits = new Counter("partition cache hits"); + partMisses = new Counter("partition cache misses"); + partOverflows = new Counter("partition cache overflows"); + sdHits = new Counter("storage descriptor cache hits"); + sdMisses = new Counter("storage descriptor cache misses"); + sdOverflows = new Counter("storage descriptor cache overflows"); + counters = new ArrayList<>(); + counters.add(tableHits); + counters.add(tableMisses); + counters.add(tableOverflows); + counters.add(partHits); + counters.add(partMisses); + counters.add(partOverflows); + counters.add(sdHits); + counters.add(sdMisses); + counters.add(sdOverflows); + + // Give 1% of catalog cache space to storage descriptors + // (storage descriptors are shared, so 99% should be the same for a given table) + int sdsCacheSize = totalCatalogObjectsToCache / 100; + if (conf.getBoolean(NO_CACHE_CONF, false)) { + tableCache = new BogusObjectCache<>(); + sdCache = new BogusObjectCache<>(); + partCache = new BogusPartitionCache(); + } else { + tableCache = new ObjectCache<>(TABLES_TO_CACHE, tableHits, tableMisses, tableOverflows); + sdCache = new ObjectCache<>(sdsCacheSize, sdHits, sdMisses, sdOverflows); + partCache = new PartitionCache(totalCatalogObjectsToCache, partHits, partMisses, partOverflows); + } + statsCache = StatsCache.getInstance(conf); + roleCache = new HashMap<>(); + entireRoleTableInCache = false; + } + + // Synchronize this so not everyone's doing it at once. + static synchronized void createTablesIfNotExist() throws IOException { + if (!tablesCreated) { + for (String name : tableNames) { + if (self.get().conn.getHBaseTable(name, true) == null) { + List families = columnFamilies.get(name); + self.get().conn.createHBaseTable(name, families); + } + } + tablesCreated = true; + } + } + + /********************************************************************************************** + * Transaction related methods + *********************************************************************************************/ + + /** + * Begin a transaction + */ + void begin() { + try { + conn.beginTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Commit a transaction + */ + void commit() { + try { + conn.commitTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + void rollback() { + try { + conn.rollbackTransaction(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + void close() throws IOException { + conn.close(); + } + + /********************************************************************************************** + * Database related methods + *********************************************************************************************/ + + /** + * Fetch a database object + * @param name name of the database to fetch + * @return the database object, or null if there is no such database + * @throws IOException + */ + Database getDb(String name) throws IOException { + byte[] key = HBaseUtils.buildKey(name); + byte[] serialized = read(DB_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeDatabase(name, serialized); + } + + /** + * Get a list of databases. + * @param regex Regular expression to use in searching for database names. It is expected to + * be a Java regular expression. If it is null then all databases will be returned. + * @return list of databases matching the regular expression. + * @throws IOException + */ + List scanDatabases(String regex) throws IOException { + Filter filter = null; + if (regex != null) { + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); + } + Iterator iter = + scan(DB_TABLE, CATALOG_CF, CATALOG_COL, filter); + List databases = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + databases.add(HBaseUtils.deserializeDatabase(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL))); + } + return databases; + } + + /** + * Store a database object + * @param database database object to store + * @throws IOException + */ + void putDb(Database database) throws IOException { + byte[][] serialized = HBaseUtils.serializeDatabase(database); + store(DB_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + } + + /** + * Drop a database + * @param name name of db to drop + * @throws IOException + */ + void deleteDb(String name) throws IOException { + byte[] key = HBaseUtils.buildKey(name); + delete(DB_TABLE, key, null, null); + } + + /********************************************************************************************** + * Function related methods + *********************************************************************************************/ + + /** + * Fetch a function object + * @param dbName name of the database the function is in + * @param functionName name of the function to fetch + * @return the function object, or null if there is no such function + * @throws IOException + */ + Function getFunction(String dbName, String functionName) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, functionName); + byte[] serialized = read(FUNC_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeFunction(dbName, functionName, serialized); + } + + /** + * Get a list of functions. + * @param dbName Name of the database to search in. + * @param regex Regular expression to use in searching for function names. It is expected to + * be a Java regular expression. If it is null then all functions will be returned. + * @return list of functions matching the regular expression. + * @throws IOException + */ + List scanFunctions(String dbName, String regex) throws IOException { + byte[] keyPrefix = null; + if (dbName != null) { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName); + } + Filter filter = null; + if (regex != null) { + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); + } + Iterator iter = + scan(FUNC_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), CATALOG_CF, CATALOG_COL, filter); + List functions = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + functions.add(HBaseUtils.deserializeFunction(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL))); + } + return functions; + } + + /** + * Store a function object + * @param function function object to store + * @throws IOException + */ + void putFunction(Function function) throws IOException { + byte[][] serialized = HBaseUtils.serializeFunction(function); + store(FUNC_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + } + + /** + * Drop a function + * @param dbName name of database the function is in + * @param functionName name of function to drop + * @throws IOException + */ + void deleteFunction(String dbName, String functionName) throws IOException { + byte[] key = HBaseUtils.buildKey(dbName, functionName); + delete(FUNC_TABLE, key, null, null); + } + + /********************************************************************************************** + * Global privilege related methods + *********************************************************************************************/ + + /** + * Fetch the global privileges object + * @return + * @throws IOException + */ + PrincipalPrivilegeSet getGlobalPrivs() throws IOException { + byte[] key = GLOBAL_PRIVS_KEY; + byte[] serialized = read(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializePrincipalPrivilegeSet(serialized); + } + + /** + * Store the global privileges object + * @throws IOException + */ + void putGlobalPrivs(PrincipalPrivilegeSet privs) throws IOException { + byte[] key = GLOBAL_PRIVS_KEY; + byte[] serialized = HBaseUtils.serializePrincipalPrivilegeSet(privs); + store(GLOBAL_PRIVS_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); + } + + /********************************************************************************************** + * Partition related methods + *********************************************************************************************/ + + /** + * Fetch one partition + * @param dbName database table is in + * @param tableName table partition is in + * @param partVals list of values that specify the partition, given in the same order as the + * columns they belong to + * @return The partition objec,t or null if there is no such partition + * @throws IOException + */ + Partition getPartition(String dbName, String tableName, List partVals) + throws IOException { + return getPartition(dbName, tableName, partVals, true); + } + + /** + * Get a set of specific partitions. This cannot be used to do a scan, each partition must be + * completely specified. This does not use the partition cache. + * @param dbName database table is in + * @param tableName table partitions are in + * @param partValLists list of list of values, each list should uniquely identify one partition + * @return a list of partition objects. + * @throws IOException + */ + List getPartitions(String dbName, String tableName, List partTypes, + List> partValLists) throws IOException { + List parts = new ArrayList<>(partValLists.size()); + List gets = new ArrayList<>(partValLists.size()); + for (List partVals : partValLists) { + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + Get get = new Get(key); + get.addColumn(CATALOG_CF, CATALOG_COL); + gets.add(get); + } + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + Result[] results = htab.get(gets); + for (int i = 0; i < results.length; i++) { + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializePartition(dbName, tableName, partValLists.get(i), + results[i].getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + parts.add(sdParts.containingPartition); + } + + return parts; + } + + /** + * Add a partition. This should only be called for new partitions. For altering existing + * partitions this should not be called as it will blindly increment the ref counter for the + * storage descriptor. + * @param partition partition object to add + * @throws IOException + */ + void putPartition(Partition partition) throws IOException { + byte[] hash = putStorageDescriptor(partition.getSd()); + byte[][] serialized = HBaseUtils.serializePartition(partition, + HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()), hash); + store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + partCache.put(partition.getDbName(), partition.getTableName(), partition); + } + + /** + * Replace an existing partition. + * @param oldPart partition to be replaced + * @param newPart partitiion to replace it with + * @throws IOException + */ + void replacePartition(Partition oldPart, Partition newPart, List partTypes) throws IOException { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldPart.getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newPart.getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldPart.getSd()); + hash = putStorageDescriptor(newPart.getSd()); + } + byte[][] serialized = HBaseUtils.serializePartition(newPart, + HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + partCache.put(newPart.getDbName(), newPart.getTableName(), newPart); + if (!oldPart.getTableName().equals(newPart.getTableName())) { + deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues()); + } + } + + /** + * Add a group of partitions. This should only be used when all partitions are new. It + * blindly increments the ref count on the storage descriptor. + * @param partitions list of partitions to add + * @throws IOException + */ + void putPartitions(List partitions) throws IOException { + List puts = new ArrayList<>(partitions.size()); + for (Partition partition : partitions) { + byte[] hash = putStorageDescriptor(partition.getSd()); + List partTypes = HBaseUtils.getPartitionKeyTypes( + getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys()); + byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash); + Put p = new Put(serialized[0]); + p.add(CATALOG_CF, CATALOG_COL, serialized[1]); + puts.add(p); + partCache.put(partition.getDbName(), partition.getTableName(), partition); + } + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + htab.put(puts); + conn.flush(htab); + } + + void replacePartitions(List oldParts, List newParts, List oldPartTypes) throws IOException { + if (oldParts.size() != newParts.size()) { + throw new RuntimeException("Number of old and new partitions must match."); + } + List puts = new ArrayList<>(newParts.size()); + for (int i = 0; i < newParts.size(); i++) { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldParts.get(i).getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newParts.get(i).getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldParts.get(i).getSd()); + hash = putStorageDescriptor(newParts.get(i).getSd()); + } + Partition newPart = newParts.get(i); + byte[][] serialized = HBaseUtils.serializePartition(newPart, + HBaseUtils.getPartitionKeyTypes(getTable(newPart.getDbName(), newPart.getTableName()).getPartitionKeys()), hash); + Put p = new Put(serialized[0]); + p.add(CATALOG_CF, CATALOG_COL, serialized[1]); + puts.add(p); + partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i)); + if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) { + // We need to remove the old record as well. + deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes, + oldParts.get(i).getValues(), false); + } + } + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + htab.put(puts); + conn.flush(htab); + } + + /** + * Find all the partitions in a table. + * @param dbName name of the database the table is in + * @param tableName table name + * @param maxPartitions max partitions to fetch. If negative all partitions will be returned. + * @return List of partitions that match the criteria. + * @throws IOException + */ + List scanPartitionsInTable(String dbName, String tableName, int maxPartitions) + throws IOException { + if (maxPartitions < 0) maxPartitions = Integer.MAX_VALUE; + Collection cached = partCache.getAllForTable(dbName, tableName); + if (cached != null) { + return maxPartitions < cached.size() + ? new ArrayList<>(cached).subList(0, maxPartitions) + : new ArrayList<>(cached); + } + byte[] keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, new ArrayList(), + new ArrayList(), false); + List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), -1, null); + partCache.put(dbName, tableName, parts, true); + return maxPartitions < parts.size() ? parts.subList(0, maxPartitions) : parts; + } + + /** + * Scan partitions based on partial key information. + * @param dbName name of database, required + * @param tableName name of table, required + * @param partVals partial specification of values. Any values that are unknown can instead be + * a '*'. For example, if a table had two partition columns date + * and region (in that order), and partitions ('today', 'na'), ('today', 'eu'), + * ('tomorrow', 'na'), ('tomorrow', 'eu') then passing ['today', '*'] would return + * ('today', 'na') and ('today', 'eu') while passing ['*', 'eu'] would return + * ('today', 'eu') and ('tomorrow', 'eu'). Also the list can terminate early, + * which will be the equivalent of adding '*' for all non-included values. + * I.e. ['today'] is the same as ['today', '*']. + * @param maxPartitions Maximum number of entries to return. + * @return list of partitions that match the specified information + * @throws IOException + * @throws org.apache.hadoop.hive.metastore.api.NoSuchObjectException if the table containing + * the partitions can't be found. + */ + List scanPartitions(String dbName, String tableName, List partVals, + int maxPartitions) throws IOException, NoSuchObjectException { + // First, build as much of the key as we can so that we make the scan as tight as possible. + List keyElements = new ArrayList<>(); + keyElements.add(dbName); + keyElements.add(tableName); + + int firstStar = -1; + for (int i = 0; i < partVals.size(); i++) { + if ("*".equals(partVals.get(i))) { + firstStar = i; + break; + } else { + // empty string equals to null partition, + // means star + if (partVals.get(i).equals("")) { + break; + } else { + keyElements.add(partVals.get(i)); + } + } + } + + byte[] keyPrefix; + // We need to fetch the table to determine if the user fully specified the partitions or + // not, as it affects how we build the key. + Table table = getTable(dbName, tableName); + if (table == null) { + throw new NoSuchObjectException("Unable to find table " + dbName + "." + tableName); + } + keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName, + HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size()-2)), + keyElements.subList(2, keyElements.size())); + + // Now, build a filter out of the remaining keys + List ranges = new ArrayList(); + List ops = new ArrayList(); + if (!(partVals.size() == table.getPartitionKeys().size() && firstStar == -1)) { + + for (int i = Math.max(0, firstStar); + i < table.getPartitionKeys().size() && i < partVals.size(); i++) { + + if ("*".equals(partVals.get(i))) { + PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator( + PartitionKeyComparator.Operator.Type.LIKE, + table.getPartitionKeys().get(i).getName(), + ".*"); + ops.add(op); + } else { + PartitionKeyComparator.Range range = new PartitionKeyComparator.Range( + table.getPartitionKeys().get(i).getName(), + new PartitionKeyComparator.Mark(partVals.get(i), true), + new PartitionKeyComparator.Mark(partVals.get(i), true)); + ranges.add(range); + } + } + } + + Filter filter = null; + if (!ranges.isEmpty() || !ops.isEmpty()) { + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new PartitionKeyComparator( + StringUtils.join(HBaseUtils.getPartitionNames(table.getPartitionKeys()), ","), + StringUtils.join(HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys()), ","), + ranges, ops)); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Scanning partitions with prefix <" + new String(keyPrefix) + "> and filter <" + + filter + ">"); + } + + List parts = scanPartitionsWithFilter(dbName, tableName, keyPrefix, + HBaseUtils.getEndPrefix(keyPrefix), maxPartitions, filter); + partCache.put(dbName, tableName, parts, false); + return parts; + } + + List scanPartitions(String dbName, String tableName, byte[] keyStart, byte[] keyEnd, + Filter filter, int maxPartitions) throws IOException, NoSuchObjectException { + byte[] startRow = keyStart; + byte[] endRow; + if (keyEnd == null || keyEnd.length == 0) { + // stop when current db+table entries are over + endRow = HBaseUtils.getEndPrefix(startRow); + } else { + endRow = keyEnd; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Scanning partitions with start row <" + new String(startRow) + "> and end row <" + + new String(endRow) + ">"); + } + return scanPartitionsWithFilter(dbName, tableName, startRow, endRow, maxPartitions, filter); + } + + + + /** + * Delete a partition + * @param dbName database name that table is in + * @param tableName table partition is in + * @param partVals partition values that define this partition, in the same order as the + * partition columns they are values for + * @throws IOException + */ + void deletePartition(String dbName, String tableName, List partTypes, + List partVals) throws IOException { + deletePartition(dbName, tableName, partTypes, partVals, true); + } + + private void deletePartition(String dbName, String tableName, List partTypes, + List partVals, boolean decrementRefCnt) throws IOException { + // Find the partition so I can get the storage descriptor and drop it + partCache.remove(dbName, tableName, partVals); + if (decrementRefCnt) { + Partition p = getPartition(dbName, tableName, partVals, false); + decrementStorageDescriptorRefCount(p.getSd()); + } + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals); + delete(PART_TABLE, key, null, null); + } + + private Partition getPartition(String dbName, String tableName, List partVals, + boolean populateCache) throws IOException { + Partition cached = partCache.get(dbName, tableName, partVals); + if (cached != null) return cached; + byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals); + byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializePartition(dbName, tableName, partVals, serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + if (populateCache) partCache.put(dbName, tableName, sdParts.containingPartition); + return sdParts.containingPartition; + } + + private List scanPartitionsWithFilter(String dbName, String tableName, + byte[] startRow, byte [] endRow, int maxResults, Filter filter) + throws IOException { + Iterator iter = + scan(PART_TABLE, startRow, endRow, CATALOG_CF, CATALOG_COL, filter); + List tablePartitions = getTable(dbName, tableName).getPartitionKeys(); + List parts = new ArrayList<>(); + int numToFetch = maxResults < 0 ? Integer.MAX_VALUE : maxResults; + for (int i = 0; i < numToFetch && iter.hasNext(); i++) { + Result result = iter.next(); + HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName, + tablePartitions, result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL), staticConf); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + parts.add(sdParts.containingPartition); + } + return parts; + } + + /********************************************************************************************** + * Role related methods + *********************************************************************************************/ + + /** + * Fetch the list of all roles for a user + * @param userName name of the user + * @return the list of all roles this user participates in + * @throws IOException + */ + List getUserRoles(String userName) throws IOException { + byte[] key = HBaseUtils.buildKey(userName); + byte[] serialized = read(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeRoleList(serialized); + } + + /** + * Find all roles directly participated in by a given principal. This builds the role cache + * because it assumes that subsequent calls may be made to find roles participated in indirectly. + * @param name username or role name + * @param type user or role + * @return map of role name to grant info for all roles directly participated in. + */ + List getPrincipalDirectRoles(String name, PrincipalType type) + throws IOException { + buildRoleCache(); + + Set rolesFound = new HashSet<>(); + for (Map.Entry e : roleCache.entrySet()) { + for (HbaseMetastoreProto.RoleGrantInfo giw : e.getValue().getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()) == type && + giw.getPrincipalName().equals(name)) { + rolesFound.add(e.getKey()); + break; + } + } + } + List directRoles = new ArrayList<>(rolesFound.size()); + List gets = new ArrayList<>(); + HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); + for (String roleFound : rolesFound) { + byte[] key = HBaseUtils.buildKey(roleFound); + Get g = new Get(key); + g.addColumn(CATALOG_CF, CATALOG_COL); + gets.add(g); + } + + Result[] results = htab.get(gets); + for (int i = 0; i < results.length; i++) { + byte[] serialized = results[i].getValue(CATALOG_CF, CATALOG_COL); + if (serialized != null) { + directRoles.add(HBaseUtils.deserializeRole(results[i].getRow(), serialized)); + } + } + + return directRoles; + } + + /** + * Fetch all roles and users included directly in a given role. + * @param roleName name of the principal + * @return a list of all roles included in this role + * @throws IOException + */ + HbaseMetastoreProto.RoleGrantInfoList getRolePrincipals(String roleName) + throws IOException, NoSuchObjectException { + HbaseMetastoreProto.RoleGrantInfoList rolePrincipals = roleCache.get(roleName); + if (rolePrincipals != null) return rolePrincipals; + byte[] key = HBaseUtils.buildKey(roleName); + byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, ROLES_COL); + if (serialized == null) return null; + rolePrincipals = HbaseMetastoreProto.RoleGrantInfoList.parseFrom(serialized); + roleCache.put(roleName, rolePrincipals); + return rolePrincipals; + } + + /** + * Given a role, find all users who are either directly or indirectly participate in this role. + * This is expensive, it should be used sparingly. It scan the entire userToRole table and + * does a linear search on each entry. + * @param roleName name of the role + * @return set of all users in the role + * @throws IOException + */ + Set findAllUsersInRole(String roleName) throws IOException { + // Walk the userToRole table and collect every user that matches this role. + Set users = new HashSet<>(); + Iterator iter = scan(USER_TO_ROLE_TABLE, CATALOG_CF, CATALOG_COL); + while (iter.hasNext()) { + Result result = iter.next(); + List roleList = + HBaseUtils.deserializeRoleList(result.getValue(CATALOG_CF, CATALOG_COL)); + for (String rn : roleList) { + if (rn.equals(roleName)) { + users.add(new String(result.getRow(), HBaseUtils.ENCODING)); + break; + } + } + } + return users; + } + + /** + * Add a principal to a role. + * @param roleName name of the role to add principal to + * @param grantInfo grant information for this principal. + * @throws java.io.IOException + * @throws NoSuchObjectException + * + */ + void addPrincipalToRole(String roleName, HbaseMetastoreProto.RoleGrantInfo grantInfo) + throws IOException, NoSuchObjectException { + HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); + List rolePrincipals = new ArrayList<>(); + if (proto != null) { + rolePrincipals.addAll(proto.getGrantInfoList()); + } + + rolePrincipals.add(grantInfo); + proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rolePrincipals) + .build(); + byte[] key = HBaseUtils.buildKey(roleName); + store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); + roleCache.put(roleName, proto); + } + + /** + * Drop a principal from a role. + * @param roleName Name of the role to drop the principal from + * @param principalName name of the principal to drop from the role + * @param type user or role + * @param grantOnly if this is true, just remove the grant option, don't actually remove the + * user from the role. + * @throws NoSuchObjectException + * @throws IOException + */ + void dropPrincipalFromRole(String roleName, String principalName, PrincipalType type, + boolean grantOnly) + throws NoSuchObjectException, IOException { + HbaseMetastoreProto.RoleGrantInfoList proto = getRolePrincipals(roleName); + if (proto == null) return; + List rolePrincipals = new ArrayList<>(); + rolePrincipals.addAll(proto.getGrantInfoList()); + + for (int i = 0; i < rolePrincipals.size(); i++) { + if (HBaseUtils.convertPrincipalTypes(rolePrincipals.get(i).getPrincipalType()) == type && + rolePrincipals.get(i).getPrincipalName().equals(principalName)) { + if (grantOnly) { + rolePrincipals.set(i, + HbaseMetastoreProto.RoleGrantInfo.newBuilder(rolePrincipals.get(i)) + .setGrantOption(false) + .build()); + } else { + rolePrincipals.remove(i); + } + break; + } + } + byte[] key = HBaseUtils.buildKey(roleName); + proto = HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rolePrincipals) + .build(); + store(ROLE_TABLE, key, CATALOG_CF, ROLES_COL, proto.toByteArray()); + roleCache.put(roleName, proto); + } + + /** + * Rebuild the row for a given user in the USER_TO_ROLE table. This is expensive. It + * should be called as infrequently as possible. + * @param userName name of the user + * @throws IOException + */ + void buildRoleMapForUser(String userName) throws IOException, NoSuchObjectException { + // This is mega ugly. Hopefully we don't have to do this too often. + // First, scan the role table and put it all in memory + buildRoleCache(); + LOG.debug("Building role map for " + userName); + + // Second, find every role the user participates in directly. + Set rolesToAdd = new HashSet<>(); + Set rolesToCheckNext = new HashSet<>(); + for (Map.Entry e : roleCache.entrySet()) { + for (HbaseMetastoreProto.RoleGrantInfo grantInfo : e.getValue().getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.USER && + userName .equals(grantInfo.getPrincipalName())) { + rolesToAdd.add(e.getKey()); + rolesToCheckNext.add(e.getKey()); + LOG.debug("Adding " + e.getKey() + " to list of roles user is in directly"); + break; + } + } + } + + // Third, find every role the user participates in indirectly (that is, they have been + // granted into role X and role Y has been granted into role X). + while (rolesToCheckNext.size() > 0) { + Set tmpRolesToCheckNext = new HashSet<>(); + for (String roleName : rolesToCheckNext) { + HbaseMetastoreProto.RoleGrantInfoList grantInfos = roleCache.get(roleName); + if (grantInfos == null) continue; // happens when a role contains no grants + for (HbaseMetastoreProto.RoleGrantInfo grantInfo : grantInfos.getGrantInfoList()) { + if (HBaseUtils.convertPrincipalTypes(grantInfo.getPrincipalType()) == PrincipalType.ROLE && + rolesToAdd.add(grantInfo.getPrincipalName())) { + tmpRolesToCheckNext.add(grantInfo.getPrincipalName()); + LOG.debug("Adding " + grantInfo.getPrincipalName() + + " to list of roles user is in indirectly"); + } + } + } + rolesToCheckNext = tmpRolesToCheckNext; + } + + byte[] key = HBaseUtils.buildKey(userName); + byte[] serialized = HBaseUtils.serializeRoleList(new ArrayList<>(rolesToAdd)); + store(USER_TO_ROLE_TABLE, key, CATALOG_CF, CATALOG_COL, serialized); + } + + /** + * Remove all of the grants for a role. This is not cheap. + * @param roleName Role to remove from all other roles and grants + * @throws IOException + */ + void removeRoleGrants(String roleName) throws IOException { + buildRoleCache(); + + List puts = new ArrayList<>(); + // First, walk the role table and remove any references to this role + for (Map.Entry e : roleCache.entrySet()) { + boolean madeAChange = false; + List rgil = new ArrayList<>(); + rgil.addAll(e.getValue().getGrantInfoList()); + for (int i = 0; i < rgil.size(); i++) { + if (HBaseUtils.convertPrincipalTypes(rgil.get(i).getPrincipalType()) == PrincipalType.ROLE && + rgil.get(i).getPrincipalName().equals(roleName)) { + rgil.remove(i); + madeAChange = true; + break; + } + } + if (madeAChange) { + Put put = new Put(HBaseUtils.buildKey(e.getKey())); + HbaseMetastoreProto.RoleGrantInfoList proto = + HbaseMetastoreProto.RoleGrantInfoList.newBuilder() + .addAllGrantInfo(rgil) + .build(); + put.add(CATALOG_CF, ROLES_COL, proto.toByteArray()); + puts.add(put); + roleCache.put(e.getKey(), proto); + } + } + + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(ROLE_TABLE); + htab.put(puts); + conn.flush(htab); + } + + // Remove any global privileges held by this role + PrincipalPrivilegeSet global = getGlobalPrivs(); + if (global != null && + global.getRolePrivileges() != null && + global.getRolePrivileges().remove(roleName) != null) { + putGlobalPrivs(global); + } + + // Now, walk the db table + puts.clear(); + List dbs = scanDatabases(null); + if (dbs == null) dbs = new ArrayList<>(); // rare, but can happen + for (Database db : dbs) { + if (db.getPrivileges() != null && + db.getPrivileges().getRolePrivileges() != null && + db.getPrivileges().getRolePrivileges().remove(roleName) != null) { + byte[][] serialized = HBaseUtils.serializeDatabase(db); + Put put = new Put(serialized[0]); + put.add(CATALOG_CF, CATALOG_COL, serialized[1]); + puts.add(put); + } + } + + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(DB_TABLE); + htab.put(puts); + conn.flush(htab); + } + + // Finally, walk the table table + puts.clear(); + for (Database db : dbs) { + List
tables = scanTables(db.getName(), null); + if (tables != null) { + for (Table table : tables) { + if (table.getPrivileges() != null && + table.getPrivileges().getRolePrivileges() != null && + table.getPrivileges().getRolePrivileges().remove(roleName) != null) { + byte[][] serialized = HBaseUtils.serializeTable(table, + HBaseUtils.hashStorageDescriptor(table.getSd(), md)); + Put put = new Put(serialized[0]); + put.add(CATALOG_CF, CATALOG_COL, serialized[1]); + puts.add(put); + } + } + } + } + + if (puts.size() > 0) { + HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); + htab.put(puts); + conn.flush(htab); + } + } + + /** + * Fetch a role + * @param roleName name of the role + * @return role object, or null if no such role + * @throws IOException + */ + Role getRole(String roleName) throws IOException { + byte[] key = HBaseUtils.buildKey(roleName); + byte[] serialized = read(ROLE_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeRole(roleName, serialized); + } + + /** + * Get a list of roles. + * @return list of all known roles. + * @throws IOException + */ + List scanRoles() throws IOException { + Iterator iter = scan(ROLE_TABLE, CATALOG_CF, CATALOG_COL); + List roles = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + roles.add(HBaseUtils.deserializeRole(result.getRow(), + result.getValue(CATALOG_CF, CATALOG_COL))); + } + return roles; + } + + /** + * Add a new role + * @param role role object + * @throws IOException + */ + void putRole(Role role) throws IOException { + byte[][] serialized = HBaseUtils.serializeRole(role); + store(ROLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + } + + /** + * Drop a role + * @param roleName name of role to drop + * @throws IOException + */ + void deleteRole(String roleName) throws IOException { + byte[] key = HBaseUtils.buildKey(roleName); + delete(ROLE_TABLE, key, null, null); + roleCache.remove(roleName); + } + + private void buildRoleCache() throws IOException { + if (!entireRoleTableInCache) { + Iterator roles = scan(ROLE_TABLE, CATALOG_CF, ROLES_COL); + while (roles.hasNext()) { + Result res = roles.next(); + String roleName = new String(res.getRow(), HBaseUtils.ENCODING); + HbaseMetastoreProto.RoleGrantInfoList grantInfos = + HbaseMetastoreProto.RoleGrantInfoList.parseFrom(res.getValue(CATALOG_CF, ROLES_COL)); + roleCache.put(roleName, grantInfos); + } + entireRoleTableInCache = true; + } + } + + /********************************************************************************************** + * Table related methods + *********************************************************************************************/ + + /** + * Fetch a table object + * @param dbName database the table is in + * @param tableName table name + * @return Table object, or null if no such table + * @throws IOException + */ + Table getTable(String dbName, String tableName) throws IOException { + return getTable(dbName, tableName, true); + } + + /** + * Fetch a list of table objects. + * @param dbName Database that all fetched tables are in + * @param tableNames list of table names + * @return list of tables, in the same order as the provided names. + * @throws IOException + */ + List
getTables(String dbName, List tableNames) throws IOException { + // I could implement getTable in terms of this method. But it is such a core function + // that I don't want to slow it down for the much less common fetching of multiple tables. + List
results = new ArrayList<>(tableNames.size()); + ObjectPair[] hashKeys = new ObjectPair[tableNames.size()]; + boolean atLeastOneMissing = false; + for (int i = 0; i < tableNames.size(); i++) { + hashKeys[i] = new ObjectPair<>(dbName, tableNames.get(i)); + // The result may be null, but we still want to add it so that we have a slot in the list + // for it. + results.add(tableCache.get(hashKeys[i])); + if (results.get(i) == null) atLeastOneMissing = true; + } + if (!atLeastOneMissing) return results; + + // Now build a single get that will fetch the remaining tables + List gets = new ArrayList<>(); + HTableInterface htab = conn.getHBaseTable(TABLE_TABLE); + for (int i = 0; i < tableNames.size(); i++) { + if (results.get(i) != null) continue; + byte[] key = HBaseUtils.buildKey(dbName, tableNames.get(i)); + Get g = new Get(key); + g.addColumn(CATALOG_CF, CATALOG_COL); + gets.add(g); + } + Result[] res = htab.get(gets); + for (int i = 0, nextGet = 0; i < tableNames.size(); i++) { + if (results.get(i) != null) continue; + byte[] serialized = res[nextGet++].getValue(CATALOG_CF, CATALOG_COL); + if (serialized != null) { + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(dbName, tableNames.get(i), serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + tableCache.put(hashKeys[i], sdParts.containingTable); + results.set(i, sdParts.containingTable); + } + } + return results; + } + + /** + * Get a list of tables. + * @param dbName Database these tables are in + * @param regex Regular expression to use in searching for table names. It is expected to + * be a Java regular expression. If it is null then all tables in the indicated + * database will be returned. + * @return list of tables matching the regular expression. + * @throws IOException + */ + List
scanTables(String dbName, String regex) throws IOException { + // There's no way to know whether all the tables we are looking for are + // in the cache, so we would need to scan one way or another. Thus there's no value in hitting + // the cache for this function. + byte[] keyPrefix = null; + if (dbName != null) { + keyPrefix = HBaseUtils.buildKeyWithTrailingSeparator(dbName); + } + Filter filter = null; + if (regex != null) { + filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); + } + Iterator iter = + scan(TABLE_TABLE, keyPrefix, HBaseUtils.getEndPrefix(keyPrefix), + CATALOG_CF, CATALOG_COL, filter); + List
tables = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL)); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + tables.add(sdParts.containingTable); + } + return tables; + } + + /** + * Put a table object. This should only be called when the table is new (create table) as it + * will blindly add/increment the storage descriptor. If you are altering an existing table + * call {@link #replaceTable} instead. + * @param table table object + * @throws IOException + */ + void putTable(Table table) throws IOException { + byte[] hash = putStorageDescriptor(table.getSd()); + byte[][] serialized = HBaseUtils.serializeTable(table, hash); + store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + tableCache.put(new ObjectPair<>(table.getDbName(), table.getTableName()), table); + } + + /** + * Replace an existing table. This will also compare the storage descriptors and see if the + * reference count needs to be adjusted + * @param oldTable old version of the table + * @param newTable new version of the table + */ + void replaceTable(Table oldTable, Table newTable) throws IOException { + byte[] hash; + byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldTable.getSd(), md); + byte[] newHash = HBaseUtils.hashStorageDescriptor(newTable.getSd(), md); + if (Arrays.equals(oldHash, newHash)) { + hash = oldHash; + } else { + decrementStorageDescriptorRefCount(oldTable.getSd()); + hash = putStorageDescriptor(newTable.getSd()); + } + byte[][] serialized = HBaseUtils.serializeTable(newTable, hash); + store(TABLE_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]); + tableCache.put(new ObjectPair<>(newTable.getDbName(), newTable.getTableName()), newTable); + if (!oldTable.getTableName().equals(newTable.getTableName())) { + deleteTable(oldTable.getDbName(), oldTable.getTableName()); + } + } + + /** + * Delete a table + * @param dbName name of database table is in + * @param tableName table to drop + * @throws IOException + */ + void deleteTable(String dbName, String tableName) throws IOException { + deleteTable(dbName, tableName, true); + } + + private void deleteTable(String dbName, String tableName, boolean decrementRefCnt) + throws IOException { + tableCache.remove(new ObjectPair<>(dbName, tableName)); + if (decrementRefCnt) { + // Find the table so I can get the storage descriptor and drop it + Table t = getTable(dbName, tableName, false); + decrementStorageDescriptorRefCount(t.getSd()); + } + byte[] key = HBaseUtils.buildKey(dbName, tableName); + delete(TABLE_TABLE, key, null, null); + } + + private Table getTable(String dbName, String tableName, boolean populateCache) + throws IOException { + ObjectPair hashKey = new ObjectPair<>(dbName, tableName); + Table cached = tableCache.get(hashKey); + if (cached != null) return cached; + byte[] key = HBaseUtils.buildKey(dbName, tableName); + byte[] serialized = read(TABLE_TABLE, key, CATALOG_CF, CATALOG_COL); + if (serialized == null) return null; + HBaseUtils.StorageDescriptorParts sdParts = + HBaseUtils.deserializeTable(dbName, tableName, serialized); + StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash); + HBaseUtils.assembleStorageDescriptor(sd, sdParts); + if (populateCache) tableCache.put(hashKey, sdParts.containingTable); + return sdParts.containingTable; + } + + /********************************************************************************************** + * StorageDescriptor related methods + *********************************************************************************************/ + + /** + * If this serde has already been read, then return it from the cache. If not, read it, then + * return it. + * @param hash hash of the storage descriptor to read + * @return the storage descriptor + * @throws IOException + */ + StorageDescriptor getStorageDescriptor(byte[] hash) throws IOException { + ByteArrayWrapper hashKey = new ByteArrayWrapper(hash); + StorageDescriptor cached = sdCache.get(hashKey); + if (cached != null) return cached; + LOG.debug("Not found in cache, looking in hbase"); + byte[] serialized = read(SD_TABLE, hash, CATALOG_CF, CATALOG_COL); + if (serialized == null) { + throw new RuntimeException("Woh, bad! Trying to fetch a non-existent storage descriptor " + + "from hash " + Base64.encodeBase64String(hash)); + } + StorageDescriptor sd = HBaseUtils.deserializeStorageDescriptor(serialized); + sdCache.put(hashKey, sd); + return sd; + } + + /** + * Lower the reference count on the storage descriptor by one. If it goes to zero, then it + * will be deleted. + * @param sd Storage descriptor + * @throws IOException + */ + void decrementStorageDescriptorRefCount(StorageDescriptor sd) throws IOException { + byte[] key = HBaseUtils.hashStorageDescriptor(sd, md); + byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); + if (serializedRefCnt == null) { + // Someone deleted it before we got to it, no worries + return; + } + int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)); + HTableInterface htab = conn.getHBaseTable(SD_TABLE); + if (--refCnt < 1) { + Delete d = new Delete(key); + // We don't use checkAndDelete here because it isn't compatible with the transaction + // managers. If the transaction managers are doing their jobs then we should not need it + // anyway. + htab.delete(d); + sdCache.remove(new ByteArrayWrapper(key)); + } else { + Put p = new Put(key); + p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); + htab.put(p); + conn.flush(htab); + } + } + + /** + * Place the common parts of a storage descriptor into the cache and write the storage + * descriptor out to HBase. This should only be called if you are sure that the storage + * descriptor needs to be added. If you have changed a table or partition but not it's storage + * descriptor do not call this method, as it will increment the reference count of the storage + * descriptor. + * @param storageDescriptor storage descriptor to store. + * @return id of the entry in the cache, to be written in for the storage descriptor + */ + byte[] putStorageDescriptor(StorageDescriptor storageDescriptor) throws IOException { + byte[] sd = HBaseUtils.serializeStorageDescriptor(storageDescriptor); + byte[] key = HBaseUtils.hashStorageDescriptor(storageDescriptor, md); + byte[] serializedRefCnt = read(SD_TABLE, key, CATALOG_CF, REF_COUNT_COL); + HTableInterface htab = conn.getHBaseTable(SD_TABLE); + if (serializedRefCnt == null) { + // We are the first to put it in the DB + Put p = new Put(key); + p.add(CATALOG_CF, CATALOG_COL, sd); + p.add(CATALOG_CF, REF_COUNT_COL, "1".getBytes(HBaseUtils.ENCODING)); + htab.put(p); + sdCache.put(new ByteArrayWrapper(key), storageDescriptor); + } else { + // Just increment the reference count + int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1; + Put p = new Put(key); + p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING)); + htab.put(p); + } + conn.flush(htab); + return key; + } + + private static class ByteArrayWrapper { + byte[] wrapped; + + ByteArrayWrapper(byte[] b) { + wrapped = b; + } + + @Override + public boolean equals(Object other) { + if (other instanceof ByteArrayWrapper) { + return Arrays.equals(((ByteArrayWrapper)other).wrapped, wrapped); + } else { + return false; + } + } + + @Override + public int hashCode() { + return Arrays.hashCode(wrapped); + } + } + + /********************************************************************************************** + * Statistics related methods + *********************************************************************************************/ + + /** + * Update statistics for one or more columns for a table or a partition. + * + * @param dbName database the table is in + * @param tableName table to update statistics for + * @param partVals partition values that define partition to update statistics for. If this is + * null, then these will be assumed to be table level statistics + * @param stats Stats object with stats for one or more columns + * @throws IOException + */ + void updateStatistics(String dbName, String tableName, List partVals, + ColumnStatistics stats) throws IOException { + byte[] key = getStatisticsKey(dbName, tableName, partVals); + String hbaseTable = getStatisticsTable(partVals); + byte[][] colnames = new byte[stats.getStatsObjSize()][]; + byte[][] serialized = new byte[stats.getStatsObjSize()][]; + for (int i = 0; i < stats.getStatsObjSize(); i++) { + ColumnStatisticsObj obj = stats.getStatsObj().get(i); + serialized[i] = HBaseUtils.serializeStatsForOneColumn(stats, obj); + String colname = obj.getColName(); + colnames[i] = HBaseUtils.buildKey(colname); + } + store(hbaseTable, key, STATS_CF, colnames, serialized); + } + + /** + * Get statistics for a table + * + * @param dbName name of database table is in + * @param tblName name of table + * @param colNames list of column names to get statistics for + * @return column statistics for indicated table + * @throws IOException + */ + ColumnStatistics getTableStatistics(String dbName, String tblName, List colNames) + throws IOException { + byte[] tabKey = HBaseUtils.buildKey(dbName, tblName); + ColumnStatistics tableStats = new ColumnStatistics(); + ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); + statsDesc.setIsTblLevel(true); + statsDesc.setDbName(dbName); + statsDesc.setTableName(tblName); + tableStats.setStatsDesc(statsDesc); + byte[][] colKeys = new byte[colNames.size()][]; + for (int i = 0; i < colKeys.length; i++) { + colKeys[i] = HBaseUtils.buildKey(colNames.get(i)); + } + Result result = read(TABLE_TABLE, tabKey, STATS_CF, colKeys); + for (int i = 0; i < colKeys.length; i++) { + byte[] serializedColStats = result.getValue(STATS_CF, colKeys[i]); + if (serializedColStats == null) { + // There were no stats for this column, so skip it + continue; + } + ColumnStatisticsObj obj = + HBaseUtils.deserializeStatsForOneColumn(tableStats, serializedColStats); + obj.setColName(colNames.get(i)); + tableStats.addToStatsObj(obj); + } + return tableStats; + } + + /** + * Get statistics for a set of partitions + * + * @param dbName name of database table is in + * @param tblName table partitions are in + * @param partNames names of the partitions, used only to set values inside the return stats + * objects + * @param partVals partition values for each partition, needed because this class doesn't know how + * to translate from partName to partVals + * @param colNames column names to fetch stats for. These columns will be fetched for all + * requested partitions + * @return list of ColumnStats, one for each partition for which we found at least one column's + * stats. + * @throws IOException + */ + List getPartitionStatistics(String dbName, String tblName, + List partNames, List> partVals, List colNames) + throws IOException { + List statsList = new ArrayList<>(partNames.size()); + Map, String> valToPartMap = new HashMap<>(partNames.size()); + List gets = new ArrayList<>(partNames.size() * colNames.size()); + assert partNames.size() == partVals.size(); + + byte[][] colNameBytes = new byte[colNames.size()][]; + for (int i = 0; i < colNames.size(); i++) { + colNameBytes[i] = HBaseUtils.buildKey(colNames.get(i)); + } + + for (int i = 0; i < partNames.size(); i++) { + valToPartMap.put(partVals.get(i), partNames.get(i)); + byte[] partKey = HBaseUtils.buildPartitionKey(dbName, tblName, + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()), + partVals.get(i)); + Get get = new Get(partKey); + for (byte[] colName : colNameBytes) { + get.addColumn(STATS_CF, colName); + } + gets.add(get); + } + + HTableInterface htab = conn.getHBaseTable(PART_TABLE); + Result[] results = htab.get(gets); + for (int i = 0; i < results.length; i++) { + ColumnStatistics colStats = null; + for (int j = 0; j < colNameBytes.length; j++) { + byte[] serializedColStats = results[i].getValue(STATS_CF, colNameBytes[j]); + if (serializedColStats != null) { + if (colStats == null) { + // We initialize this late so that we don't create extras in the case of + // partitions with no stats + colStats = new ColumnStatistics(); + statsList.add(colStats); + ColumnStatisticsDesc csd = new ColumnStatisticsDesc(); + + // We need to figure out which partition these call stats are from. To do that we + // recontruct the key. We have to pull the dbName and tableName out of the key to + // find the partition values. + byte[] key = results[i].getRow(); + List reconstructedPartVals = + HBaseUtils.deserializePartitionKey(getTable(dbName, tblName).getPartitionKeys(), key, + staticConf); + String partName = valToPartMap.get(reconstructedPartVals); + assert partName != null; + csd.setIsTblLevel(false); + csd.setDbName(dbName); + csd.setTableName(tblName); + csd.setPartName(partName); + colStats.setStatsDesc(csd); + } + ColumnStatisticsObj cso = + HBaseUtils.deserializeStatsForOneColumn(colStats, serializedColStats); + cso.setColName(colNames.get(j)); + colStats.addToStatsObj(cso); + } + } + } + + return statsList; + } + + /** + * Get a reference to the stats cache. + * @return the stats cache. + */ + StatsCache getStatsCache() { + return statsCache; + } + + /** + * Get aggregated stats. Only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly + * but should call StatsCache.get instead. + * @param key The md5 hash associated with this partition set + * @return stats if hbase has them, else null + * @throws IOException + */ + AggrStats getAggregatedStats(byte[] key) throws IOException{ + byte[] serialized = read(AGGR_STATS_TABLE, key, CATALOG_CF, AGGR_STATS_STATS_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeAggrStats(serialized); + + } + + /** + * Put aggregated stats Only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. Others should not call directly + * but should call StatsCache.put instead. + * @param key The md5 hash associated with this partition set + * @param dbName Database these partitions are in + * @param tableName Table these partitions are in + * @param partNames Partition names + * @param colName Column stats are for + * @param stats Stats + * @throws IOException + */ + void putAggregatedStats(byte[] key, String dbName, String tableName, List partNames, + String colName, AggrStats stats) throws IOException { + // Serialize the part names + List protoNames = new ArrayList<>(partNames.size() + 3); + protoNames.add(dbName); + protoNames.add(tableName); + protoNames.add(colName); + protoNames.addAll(partNames); + // Build a bloom Filter for these partitions + BloomFilter bloom = new BloomFilter(partNames.size(), STATS_BF_ERROR_RATE); + for (String partName : partNames) { + bloom.add(partName.getBytes(HBaseUtils.ENCODING)); + } + byte[] serializedFilter = HBaseUtils.serializeBloomFilter(dbName, tableName, bloom); + + byte[] serializedStats = HBaseUtils.serializeAggrStats(stats); + store(AGGR_STATS_TABLE, key, CATALOG_CF, + new byte[][]{AGGR_STATS_BLOOM_COL, AGGR_STATS_STATS_COL}, + new byte[][]{serializedFilter, serializedStats}); + } + + // TODO - We shouldn't remove an entry from the cache as soon as a single partition is deleted. + // TODO - Instead we should keep track of how many partitions have been deleted and only remove + // TODO - an entry once it passes a certain threshold, like 5%, of partitions have been removed. + // TODO - That requires moving this from a filter to a co-processor. + /** + * Invalidate stats associated with the listed partitions. This method is intended for use + * only by {@link org.apache.hadoop.hive.metastore.hbase.StatsCache}. + * @param filter serialized version of the filter to pass + * @return List of md5 hash keys for the partition stat sets that were removed. + * @throws IOException + */ + List + invalidateAggregatedStats(HbaseMetastoreProto.AggrStatsInvalidatorFilter filter) + throws IOException { + Iterator results = scan(AGGR_STATS_TABLE, new AggrStatsInvalidatorFilter(filter)); + if (!results.hasNext()) return Collections.emptyList(); + List deletes = new ArrayList<>(); + List keys = new ArrayList<>(); + while (results.hasNext()) { + Result result = results.next(); + deletes.add(new Delete(result.getRow())); + keys.add(new StatsCache.StatsCacheKey(result.getRow())); + } + HTableInterface htab = conn.getHBaseTable(AGGR_STATS_TABLE); + htab.delete(deletes); + return keys; + } + + private byte[] getStatisticsKey(String dbName, String tableName, List partVals) throws IOException { + return partVals == null ? HBaseUtils.buildKey(dbName, tableName) : HBaseUtils + .buildPartitionKey(dbName, tableName, + HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), + partVals); + } + + private String getStatisticsTable(List partVals) { + return partVals == null ? TABLE_TABLE : PART_TABLE; + } + + /********************************************************************************************** + * File metadata related methods + *********************************************************************************************/ + + /** + * @param fileIds file ID list. + * @return Serialized file metadata. + */ + ByteBuffer[] getFileMetadata(List fileIds) throws IOException { + byte[][] keys = new byte[fileIds.size()][]; + for (int i = 0; i < fileIds.size(); ++i) { + keys[i] = HBaseUtils.makeLongKey(fileIds.get(i)); + } + ByteBuffer[] result = new ByteBuffer[keys.length]; + multiRead(FILE_METADATA_TABLE, CATALOG_CF, CATALOG_COL, keys, result); + return result; + } + + /** + * @param fileIds file ID list. + * @param metadata Serialized file metadata. + */ + void storeFileMetadata(List fileIds, List metadata) + throws IOException, InterruptedException { + byte[][] keys = new byte[fileIds.size()][]; + for (int i = 0; i < fileIds.size(); ++i) { + keys[i] = HBaseUtils.makeLongKey(fileIds.get(i)); + } + multiModify(FILE_METADATA_TABLE, keys, CATALOG_CF, CATALOG_COL, metadata); + } + + /********************************************************************************************** + * Security related methods + *********************************************************************************************/ + + /** + * Fetch a delegation token + * @param tokId identifier of the token to fetch + * @return the delegation token, or null if there is no such delegation token + * @throws IOException + */ + String getDelegationToken(String tokId) throws IOException { + byte[] key = HBaseUtils.buildKey(tokId); + byte[] serialized = read(SECURITY_TABLE, key, CATALOG_CF, DELEGATION_TOKEN_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeDelegationToken(serialized); + } + + /** + * Get all delegation token ids + * @return list of all delegation token identifiers + * @throws IOException + */ + List scanDelegationTokenIdentifiers() throws IOException { + Iterator iter = scan(SECURITY_TABLE, CATALOG_CF, DELEGATION_TOKEN_COL); + List ids = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + byte[] serialized = result.getValue(CATALOG_CF, DELEGATION_TOKEN_COL); + if (serialized != null) { + // Don't deserialize the value, as what we're after is the key. We just had to check the + // value wasn't null in order to check this is a record with a delegation token and not a + // master key. + ids.add(new String(result.getRow(), HBaseUtils.ENCODING)); + + } + } + return ids; + } + + /** + * Store a delegation token + * @param tokId token id + * @param token delegation token to store + * @throws IOException + */ + void putDelegationToken(String tokId, String token) throws IOException { + byte[][] serialized = HBaseUtils.serializeDelegationToken(tokId, token); + store(SECURITY_TABLE, serialized[0], CATALOG_CF, DELEGATION_TOKEN_COL, serialized[1]); + } + + /** + * Delete a delegation token + * @param tokId identifier of token to drop + * @throws IOException + */ + void deleteDelegationToken(String tokId) throws IOException { + byte[] key = HBaseUtils.buildKey(tokId); + delete(SECURITY_TABLE, key, CATALOG_CF, DELEGATION_TOKEN_COL); + } + + /** + * Fetch a master key + * @param seqNo sequence number of the master key + * @return the master key, or null if there is no such master key + * @throws IOException + */ + String getMasterKey(Integer seqNo) throws IOException { + byte[] key = HBaseUtils.buildKey(seqNo.toString()); + byte[] serialized = read(SECURITY_TABLE, key, CATALOG_CF, MASTER_KEY_COL); + if (serialized == null) return null; + return HBaseUtils.deserializeMasterKey(serialized); + } + + /** + * Get all master keys + * @return list of all master keys + * @throws IOException + */ + List scanMasterKeys() throws IOException { + Iterator iter = scan(SECURITY_TABLE, CATALOG_CF, MASTER_KEY_COL); + List keys = new ArrayList<>(); + while (iter.hasNext()) { + Result result = iter.next(); + byte[] serialized = result.getValue(CATALOG_CF, MASTER_KEY_COL); + if (serialized != null) { + keys.add(HBaseUtils.deserializeMasterKey(serialized)); + + } + } + return keys; + } + + /** + * Store a master key + * @param seqNo sequence number + * @param key master key to store + * @throws IOException + */ + void putMasterKey(Integer seqNo, String key) throws IOException { + byte[][] serialized = HBaseUtils.serializeMasterKey(seqNo, key); + store(SECURITY_TABLE, serialized[0], CATALOG_CF, MASTER_KEY_COL, serialized[1]); + } + + /** + * Delete a master key + * @param seqNo sequence number of master key to delete + * @throws IOException + */ + void deleteMasterKey(Integer seqNo) throws IOException { + byte[] key = HBaseUtils.buildKey(seqNo.toString()); + delete(SECURITY_TABLE, key, CATALOG_CF, MASTER_KEY_COL); + } + + /********************************************************************************************** + * Sequence methods + *********************************************************************************************/ + + long getNextSequence(byte[] sequence) throws IOException { + byte[] serialized = read(SEQUENCES_TABLE, SEQUENCES_KEY, CATALOG_CF, sequence); + long val = 0; + if (serialized != null) { + val = Long.valueOf(new String(serialized, HBaseUtils.ENCODING)); + } + byte[] incrSerialized = new Long(val + 1).toString().getBytes(HBaseUtils.ENCODING); + store(SEQUENCES_TABLE, SEQUENCES_KEY, CATALOG_CF, sequence, incrSerialized); + return val; + } + + /********************************************************************************************** + * Cache methods + *********************************************************************************************/ + + /** + * This should be called whenever a new query is started. + */ + void flushCatalogCache() { + if (LOG.isDebugEnabled()) { + for (Counter counter : counters) { + LOG.debug(counter.dump()); + counter.clear(); + } + statsCache.dumpCounters(); + } + tableCache.flush(); + sdCache.flush(); + partCache.flush(); + flushRoleCache(); + } + + private void flushRoleCache() { + roleCache.clear(); + entireRoleTableInCache = false; + } + + /********************************************************************************************** + * General access methods + *********************************************************************************************/ + + private void store(String table, byte[] key, byte[] colFam, byte[] colName, byte[] obj) + throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Put p = new Put(key); + p.add(colFam, colName, obj); + htab.put(p); + conn.flush(htab); + } + + private void store(String table, byte[] key, byte[] colFam, byte[][] colName, byte[][] obj) + throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Put p = new Put(key); + for (int i = 0; i < colName.length; i++) { + p.add(colFam, colName[i], obj[i]); + } + htab.put(p); + conn.flush(htab); + } + + private byte[] read(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Get g = new Get(key); + g.addColumn(colFam, colName); + Result res = htab.get(g); + return res.getValue(colFam, colName); + } + + private void multiRead(String table, byte[] colFam, byte[] colName, + byte[][] keys, ByteBuffer[] resultDest) throws IOException { + assert keys.length == resultDest.length; + @SuppressWarnings("deprecation") + HTableInterface htab = conn.getHBaseTable(table); + List gets = new ArrayList<>(keys.length); + for (byte[] key : keys) { + Get g = new Get(key); + g.addColumn(colFam, colName); + gets.add(g); + } + Result[] results = htab.get(gets); + for (int i = 0; i < results.length; ++i) { + Result r = results[i]; + resultDest[i] = (r.isEmpty() ? null : r.getValueAsByteBuffer(colFam, colName)); + } + } + + private void multiModify(String table, byte[][] keys, byte[] colFam, + byte[] colName, List values) throws IOException, InterruptedException { + assert values == null || keys.length == values.size(); + // HBase APIs are weird. To supply bytebuffer value, you have to also have bytebuffer + // column name, but not column family. So there. Perhaps we should add these to constants too. + ByteBuffer colNameBuf = ByteBuffer.wrap(colName); + @SuppressWarnings("deprecation") + HTableInterface htab = conn.getHBaseTable(table); + List actions = new ArrayList<>(keys.length); + for (int i = 0; i < keys.length; ++i) { + ByteBuffer value = (values != null) ? values.get(i) : null; + if (value == null) { + actions.add(new Delete(keys[i])); + } else { + Put p = new Put(keys[i]); + p.addColumn(colFam, colNameBuf, HConstants.LATEST_TIMESTAMP, value); + actions.add(p); + } + } + Object[] results = new Object[keys.length]; + htab.batch(actions, results); + // TODO: should we check results array? we don't care about partial results + conn.flush(htab); + } + + private Result read(String table, byte[] key, byte[] colFam, byte[][] colNames) + throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Get g = new Get(key); + for (byte[] colName : colNames) g.addColumn(colFam, colName); + return htab.get(g); + } + + // Delete a row. If colFam and colName are not null, then only the named column will be + // deleted. If colName is null and colFam is not, only the named family will be deleted. If + // both are null the entire row will be deleted. + private void delete(String table, byte[] key, byte[] colFam, byte[] colName) throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Delete d = new Delete(key); + if (colName != null) d.deleteColumn(colFam, colName); + else if (colFam != null) d.deleteFamily(colFam); + htab.delete(d); + } + + private Iterator scan(String table, byte[] colFam, byte[] colName) throws IOException { + return scan(table, null, null, colFam, colName, null); + } + + private Iterator scan(String table, byte[] colFam, byte[] colName, + Filter filter) throws IOException { + return scan(table, null, null, colFam, colName, filter); + } + + private Iterator scan(String table, Filter filter) throws IOException { + return scan(table, null, null, null, null, filter); + } + + private Iterator scan(String table, byte[] keyStart, byte[] keyEnd, byte[] colFam, + byte[] colName, Filter filter) throws IOException { + HTableInterface htab = conn.getHBaseTable(table); + Scan s = new Scan(); + if (keyStart != null) { + s.setStartRow(keyStart); + } + if (keyEnd != null) { + s.setStopRow(keyEnd); + } + if (colFam != null && colName != null) { + s.addColumn(colFam, colName); + } + if (filter != null) { + s.setFilter(filter); + } + ResultScanner scanner = htab.getScanner(s); + return scanner.iterator(); + } + + + + /********************************************************************************************** + * Testing methods and classes + *********************************************************************************************/ + + @VisibleForTesting + int countStorageDescriptor() throws IOException { + ResultScanner scanner = conn.getHBaseTable(SD_TABLE).getScanner(new Scan()); + int cnt = 0; + Result r; + do { + r = scanner.next(); + if (r != null) { + LOG.debug("Saw record with hash " + Base64.encodeBase64String(r.getRow())); + cnt++; + } + } while (r != null); + + return cnt; + } + + /** + * Use this for unit testing only, so that a mock connection object can be passed in. + * @param connection Mock connection objecct + */ + @VisibleForTesting + static void setTestConnection(HBaseConnection connection) { + testConn = connection; + } + + + // For testing without the cache + private static class BogusObjectCache extends ObjectCache { + static Counter bogus = new Counter("bogus"); + + BogusObjectCache() { + super(1, bogus, bogus, bogus); + } + + @Override + V get(K key) { + return null; + } + } + + private static class BogusPartitionCache extends PartitionCache { + static Counter bogus = new Counter("bogus"); + + BogusPartitionCache() { + super(1, bogus, bogus, bogus); + } + + @Override + Collection getAllForTable(String dbName, String tableName) { + return null; + } + + @Override + Partition get(String dbName, String tableName, List partVals) { + return null; + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java new file mode 100644 index 0000000..1c407f1 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseSchemaTool.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.thrift.TBase; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.protocol.TSimpleJSONProtocol; +import org.apache.thrift.transport.TMemoryBuffer; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.List; + +/** + * A tool to dump contents from the HBase store in a human readable form + */ +public class HBaseSchemaTool { + + private static String[] commands = {"db", "part", "parts", "role", "table", "function", + "install"}; + + public static void main(String[] args) throws Exception { + Options options = new Options(); + + options.addOption(OptionBuilder + .withLongOpt("column") + .withDescription("Comma separated list of column names") + .hasArg() + .create('c')); + + options.addOption(OptionBuilder + .withLongOpt("db") + .withDescription("Database name") + .hasArg() + .create('d')); + + options.addOption(OptionBuilder + .withLongOpt("function") + .withDescription("Function name") + .hasArg() + .create('f')); + + options.addOption(OptionBuilder + .withLongOpt("help") + .withDescription("You're looking at it") + .create('h')); + + options.addOption(OptionBuilder + .withLongOpt("role") + .withDescription("Role name") + .hasArg() + .create('r')); + + options.addOption(OptionBuilder + .withLongOpt("partvals") + .withDescription("Comma separated list of partition values, in order of partition columns") + .hasArg() + .create('p')); + + options.addOption(OptionBuilder + .withLongOpt("stats") + .withDescription("Get statistics rather than catalog object") + .create('s')); + + options.addOption(OptionBuilder + .withLongOpt("table") + .withDescription("Table name") + .hasArg() + .create('t')); + + CommandLine cli = new GnuParser().parse(options, args); + + if (cli.hasOption('h')) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("hbaseschematool", options); + return; + } + + String[] cmds = cli.getArgs(); + if (cmds.length != 1) { + System.err.print("Must include a cmd, valid cmds are: "); + for (int i = 0; i < commands.length; i++) { + if (i != 0) System.err.print(", "); + System.err.print(commands[i]); + } + System.err.println(); + System.exit(1); + } + String cmd = cmds[0]; + + List parts = null; + if (cli.hasOption('p')) { + parts = Arrays.asList(cli.getOptionValue('p').split(",")); + } + + List cols = null; + if (cli.hasOption('c')) { + cols = Arrays.asList(cli.getOptionValue('c').split(",")); + } + + HBaseSchemaTool tool = new HBaseSchemaTool(cli.getOptionValue('d'), cli.getOptionValue('t'), + parts, cli.getOptionValue('f'), cli.getOptionValue('r'), cols, cli.hasOption('s')); + Method method = tool.getClass().getMethod(cmd); + method.invoke(tool); + + + } + + private HBaseReadWrite hrw; + private String dbName; + private String funcName; + private String tableName; + private List partVals; + private String roleName; + private List colNames; + private boolean hasStats; + + private HBaseSchemaTool(String dbname, String tn, List pv, String fn, String rn, + List cn, boolean s) { + dbName = dbname; + tableName = tn; + partVals = pv; + funcName = fn; + roleName = rn; + colNames = cn; + hasStats = s; + hrw = HBaseReadWrite.getInstance(new Configuration()); + } + + public void db() throws IOException, TException { + Database db = hrw.getDb(dbName); + if (db == null) System.err.println("No such database: " + db); + else dump(db); + } + + public void install() throws IOException { + HBaseReadWrite.createTablesIfNotExist(); + } + + public void part() throws IOException, TException { + if (hasStats) { + Table table = hrw.getTable(dbName, tableName); + if (table == null) { + System.err.println("No such table: " + dbName + "." + tableName); + return; + } + String partName = HBaseStore.buildExternalPartName(table, partVals); + List stats = hrw.getPartitionStatistics(dbName, tableName, + Arrays.asList(partName), Arrays.asList(partVals), colNames); + if (stats == null) { + System.err.println("No stats for " + dbName + "." + tableName + "." + + StringUtils.join(partVals, ':')); + } else { + for (ColumnStatistics stat : stats) dump(stat); + } + } else { + Partition part = hrw.getPartition(dbName, tableName, partVals); + if (part == null) { + System.err.println("No such partition: " + dbName + "." + tableName + "." + + StringUtils.join(partVals, ':')); + } else { + dump(part); + } + } + } + + public void parts() throws IOException, TException { + List parts = hrw.scanPartitionsInTable(dbName, tableName, -1); + if (parts == null) { + System.err.println("No such table: " + dbName + "." + tableName); + } else { + for (Partition p : parts) dump(p); + } + } + + public void role() throws IOException, TException { + Role role = hrw.getRole(roleName); + if (role == null) System.err.println("No such role: " + roleName); + else dump(role); + } + + public void table() throws IOException, TException { + if (hasStats) { + ColumnStatistics stats = hrw.getTableStatistics(dbName, tableName, colNames); + if (stats == null) System.err.println("No stats for " + dbName + "." + tableName); + else dump(stats); + } else { + Table table = hrw.getTable(dbName, tableName); + if (table == null) System.err.println("No such table: " + dbName + "." + tableName); + else dump(table); + } + } + + public void function() throws IOException, TException { + Function func = hrw.getFunction(dbName, funcName); + if (func == null) System.err.println("No such function: " + dbName + "." + funcName); + else dump(func); + } + + private void dump(TBase thriftObj) throws TException { + TMemoryBuffer buf = new TMemoryBuffer(1000); + TProtocol protocol = new TSimpleJSONProtocol(buf); + thriftObj.write(protocol); + System.out.println(new String(buf.getArray())); + } + + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java new file mode 100644 index 0000000..df0fac3 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -0,0 +1,2387 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheLoader; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.PartFilterExprUtil; +import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.thrift.TException; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +/** + * Implementation of RawStore that stores data in HBase + */ +public class HBaseStore implements RawStore { + static final private Log LOG = LogFactory.getLog(HBaseStore.class.getName()); + + // Do not access this directly, call getHBase to make sure it is initialized. + private HBaseReadWrite hbase = null; + private Configuration conf; + private int txnNestLevel = 0; + private PartitionExpressionProxy expressionProxy = null; + + public HBaseStore() { + } + + @Override + public void shutdown() { + try { + if (txnNestLevel != 0) rollbackTransaction(); + getHBase().close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public boolean openTransaction() { + if (txnNestLevel++ <= 0) { + LOG.debug("Opening HBase transaction"); + getHBase().begin(); + txnNestLevel = 1; + } + return true; + } + + @Override + public boolean commitTransaction() { + if (--txnNestLevel == 0) { + LOG.debug("Committing HBase transaction"); + getHBase().commit(); + } + return true; + } + + @Override + public void rollbackTransaction() { + txnNestLevel = 0; + LOG.debug("Rolling back HBase transaction"); + getHBase().rollback(); + } + + @Override + public void createDatabase(Database db) throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + Database dbCopy = db.deepCopy(); + dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName())); + // HiveMetaStore already checks for existence of the database, don't recheck + getHBase().putDb(dbCopy); + commit = true; + } catch (IOException e) { + LOG.error("Unable to create database ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + + } + + @Override + public Database getDatabase(String name) throws NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + Database db = getHBase().getDb(HiveStringUtils.normalizeIdentifier(name)); + if (db == null) { + throw new NoSuchObjectException("Unable to find db " + name); + } + commit = true; + return db; + } catch (IOException e) { + LOG.error("Unable to get db", e); + throw new NoSuchObjectException("Error reading db " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + getHBase().deleteDb(HiveStringUtils.normalizeIdentifier(dbname)); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete db" + e); + throw new MetaException("Unable to drop database " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, + MetaException { + // ObjectStore fetches the old db before updating it, but I can't see the possible value of + // that since the caller will have needed to call getDatabase to have the db object. + boolean commit = false; + openTransaction(); + try { + Database dbCopy = db.deepCopy(); + dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName())); + getHBase().putDb(dbCopy); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to alter database ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getDatabases(String pattern) throws MetaException { + boolean commit = false; + openTransaction(); + try { + List dbs = getHBase().scanDatabases( + pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern))); + List dbNames = new ArrayList(dbs.size()); + for (Database db : dbs) dbNames.add(db.getName()); + commit = true; + return dbNames; + } catch (IOException e) { + LOG.error("Unable to get databases ", e); + throw new MetaException("Unable to get databases, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getAllDatabases() throws MetaException { + return getDatabases(null); + } + + @Override + public boolean createType(Type type) { + throw new UnsupportedOperationException(); + } + + @Override + public Type getType(String typeName) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropType(String typeName) { + throw new UnsupportedOperationException(); + } + + @Override + public void createTable(Table tbl) throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + // HiveMetaStore above us checks if the table already exists, so we can blindly store it here. + try { + Table tblCopy = tbl.deepCopy(); + tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(tblCopy.getDbName())); + tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblCopy.getTableName())); + getHBase().putTable(tblCopy); + commit = true; + } catch (IOException e) { + LOG.error("Unable to create table ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean dropTable(String dbName, String tableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException { + boolean commit = false; + openTransaction(); + try { + getHBase().deleteTable(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName)); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete db" + e); + throw new MetaException("Unable to drop table " + tableNameForErrorMsg(dbName, tableName)); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public Table getTable(String dbName, String tableName) throws MetaException { + boolean commit = false; + openTransaction(); + try { + Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName)); + if (table == null) { + LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName)); + } + commit = true; + return table; + } catch (IOException e) { + LOG.error("Unable to get table", e); + throw new MetaException("Error reading table " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + Partition partCopy = part.deepCopy(); + partCopy.setDbName(HiveStringUtils.normalizeIdentifier(part.getDbName())); + partCopy.setTableName(HiveStringUtils.normalizeIdentifier(part.getTableName())); + getHBase().putPartition(partCopy); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to add partition", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean addPartitions(String dbName, String tblName, List parts) + throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + List partsCopy = new ArrayList(); + for (int i=0;i part_vals) throws + MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + Partition part = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName), part_vals); + if (part == null) { + throw new NoSuchObjectException("Unable to find partition " + + partNameForErrorMsg(dbName, tableName, part_vals)); + } + commit = true; + return part; + } catch (IOException e) { + LOG.error("Unable to get partition", e); + throw new MetaException("Error reading partition " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean doesPartitionExist(String dbName, String tableName, List part_vals) throws + MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + boolean exists = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName), part_vals) != null; + commit = true; + return exists; + } catch (IOException e) { + LOG.error("Unable to get partition", e); + throw new MetaException("Error reading partition " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean dropPartition(String dbName, String tableName, List part_vals) throws + MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + boolean commit = false; + openTransaction(); + try { + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = HiveStringUtils.normalizeIdentifier(tableName); + getHBase().deletePartition(dbName, tableName, HBaseUtils.getPartitionKeyTypes( + getTable(dbName, tableName).getPartitionKeys()), part_vals); + // Drop any cached stats that reference this partitions + getHBase().getStatsCache().invalidate(dbName, tableName, + buildExternalPartName(dbName, tableName, part_vals)); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete db" + e); + throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName, + part_vals)); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getPartitions(String dbName, String tableName, int max) throws + MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName), max); + commit = true; + return parts; + } catch (IOException e) { + LOG.error("Unable to get partitions", e); + throw new MetaException("Error scanning partitions"); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void alterTable(String dbName, String tableName, Table newTable) throws InvalidObjectException, + MetaException { + boolean commit = false; + openTransaction(); + try { + Table newTableCopy = newTable.deepCopy(); + newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName())); + List oldPartTypes = getTable(dbName, tableName).getPartitionKeys()==null? + null:HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()); + newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName())); + getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy); + if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0 + && !tableName.equals(newTable.getTableName())) { + // They renamed the table, so we need to change each partition as well, since it changes + // the key. + try { + List oldParts = getPartitions(dbName, tableName, -1); + List newParts = new ArrayList<>(oldParts.size()); + for (Partition oldPart : oldParts) { + Partition newPart = oldPart.deepCopy(); + newPart.setTableName(newTable.getTableName()); + newParts.add(newPart); + } + getHBase().replacePartitions(oldParts, newParts, oldPartTypes); + } catch (NoSuchObjectException e) { + LOG.debug("No partitions found for old table so not worrying about it"); + } + + } + commit = true; + } catch (IOException e) { + LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e); + throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbName, tableName)); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getTables(String dbName, String pattern) throws MetaException { + boolean commit = false; + openTransaction(); + try { + List
tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName), + pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern))); + List tableNames = new ArrayList(tables.size()); + for (Table table : tables) tableNames.add(table.getTableName()); + commit = true; + return tableNames; + } catch (IOException e) { + LOG.error("Unable to get tables ", e); + throw new MetaException("Unable to get tables, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List
getTableObjectsByName(String dbname, List tableNames) throws + MetaException, UnknownDBException { + boolean commit = false; + openTransaction(); + try { + List normalizedTableNames = new ArrayList(tableNames.size()); + for (String tableName : tableNames) { + normalizedTableNames.add(HiveStringUtils.normalizeIdentifier(tableName)); + } + List
tables = getHBase().getTables(HiveStringUtils.normalizeIdentifier(dbname), + normalizedTableNames); + commit = true; + return tables; + } catch (IOException e) { + LOG.error("Unable to get tables ", e); + throw new MetaException("Unable to get tables, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getAllTables(String dbName) throws MetaException { + return getTables(dbName, null); + } + + @Override + public List listTableNamesByFilter(String dbName, String filter, short max_tables) throws + MetaException, UnknownDBException { + // TODO needs to wait until we support pushing filters into HBase. + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionNames(String db_name, String tbl_name, short max_parts) throws + MetaException { + boolean commit = false; + openTransaction(); + try { + List parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), max_parts); + if (parts == null) return null; + List names = new ArrayList(parts.size()); + Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name)); + for (Partition p : parts) { + names.add(buildExternalPartName(table, p)); + } + commit = true; + return names; + } catch (IOException e) { + LOG.error("Unable to get partitions", e); + throw new MetaException("Error scanning partitions"); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, + short max_parts) throws MetaException { + // TODO needs to wait until we support pushing filters into HBase. + throw new UnsupportedOperationException(); + } + + @Override + public void alterPartition(String db_name, String tbl_name, List part_vals, + Partition new_part) throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + Partition new_partCopy = new_part.deepCopy(); + new_partCopy.setDbName(HiveStringUtils.normalizeIdentifier(new_partCopy.getDbName())); + new_partCopy.setTableName(HiveStringUtils.normalizeIdentifier(new_partCopy.getTableName())); + Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), part_vals); + getHBase().replacePartition(oldPart, new_partCopy, HBaseUtils.getPartitionKeyTypes( + getTable(db_name, tbl_name).getPartitionKeys())); + // Drop any cached stats that reference this partitions + getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), + buildExternalPartName(db_name, tbl_name, part_vals)); + commit = true; + } catch (IOException e) { + LOG.error("Unable to add partition", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void alterPartitions(String db_name, String tbl_name, List> part_vals_list, + List new_parts) throws InvalidObjectException, + MetaException { + boolean commit = false; + openTransaction(); + try { + List new_partsCopy = new ArrayList(); + for (int i=0;i oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), + HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list); + getHBase().replacePartitions(oldParts, new_partsCopy, HBaseUtils.getPartitionKeyTypes( + getTable(db_name, tbl_name).getPartitionKeys())); + for (List part_vals : part_vals_list) { + getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), + buildExternalPartName(db_name, tbl_name, part_vals)); + } + commit = true; + } catch (IOException e) { + LOG.error("Unable to add partition", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean addIndex(Index index) throws InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public Index getIndex(String dbName, String origTableName, String indexName) throws + MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropIndex(String dbName, String origTableName, String indexName) throws + MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public List getIndexes(String dbName, String origTableName, int max) throws MetaException { + // TODO - Index not currently supported. But I need to return an empty list or else drop + // table cores. + return new ArrayList(); + } + + @Override + public List listIndexNames(String dbName, String origTableName, short max) throws + MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws + InvalidObjectException, MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public List getPartitionsByFilter(String dbName, String tblName, String filter, + short maxParts) throws MetaException, + NoSuchObjectException { + final ExpressionTree exprTree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil + .getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; + List result = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName), + HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result); + return result; + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + String defaultPartitionName, short maxParts, + List result) throws TException { + final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tblName = HiveStringUtils.normalizeIdentifier(tblName); + Table table = getTable(dbName, tblName); + boolean commit = false; + openTransaction(); + try { + if (exprTree == null) { + List partNames = new LinkedList(); + boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( + table, expr, defaultPartitionName, maxParts, partNames); + result.addAll(getPartitionsByNames(dbName, tblName, partNames)); + return hasUnknownPartitions; + } else { + return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); + } + } finally { + commitOrRoleBack(commit); + } + } + + /** + * Gets the partition names from a table, pruned using an expression. + * @param table Table. + * @param expr Expression. + * @param defaultPartName Default partition name from job config, if any. + * @param maxParts Maximum number of partition names to return. + * @param result The resulting names. + * @return Whether the result contains any unknown partitions. + * @throws NoSuchObjectException + */ + private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, + String defaultPartName, short maxParts, List result) throws MetaException, NoSuchObjectException { + List parts = getPartitions( + table.getDbName(), table.getTableName(), maxParts); + for (Partition part : parts) { + result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); + } + List columnNames = new ArrayList(); + List typeInfos = new ArrayList(); + for (FieldSchema fs : table.getPartitionKeys()) { + columnNames.add(fs.getName()); + typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType())); + } + if (defaultPartName == null || defaultPartName.isEmpty()) { + defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + } + return expressionProxy.filterPartitionsByExpr( + columnNames, typeInfos, expr, defaultPartName, result); + } + + private boolean getPartitionsByExprInternal(String dbName, String tblName, + ExpressionTree exprTree, short maxParts, List result) throws MetaException, + NoSuchObjectException { + + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tblName = HiveStringUtils.normalizeIdentifier(tblName); + Table table = getTable(dbName, tblName); + if (table == null) { + throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName); + } + // general hbase filter plan from expression tree + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys()); + if (LOG.isDebugEnabled()) { + LOG.debug("Hbase Filter Plan generated : " + planRes.plan); + } + + // results from scans need to be merged as there can be overlapping results between + // the scans. Use a map of list of partition values to partition for this. + Map, Partition> mergedParts = new HashMap, Partition>(); + for (ScanPlan splan : planRes.plan.getPlans()) { + try { + List parts = getHBase().scanPartitions(dbName, tblName, + splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), + splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), + splan.getFilter(table.getPartitionKeys()), -1); + boolean reachedMax = false; + for (Partition part : parts) { + mergedParts.put(part.getValues(), part); + if (mergedParts.size() == maxParts) { + reachedMax = true; + break; + } + } + if (reachedMax) { + break; + } + } catch (IOException e) { + LOG.error("Unable to get partitions", e); + throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) + + ": " + e); + } + } + for (Entry, Partition> mp : mergedParts.entrySet()) { + result.add(mp.getValue()); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Matched partitions " + result); + } + + // return true if there might be some additional partitions that don't match filter conditions + // being returned + return !planRes.hasUnsupportedCondition; + } + + @Override + public List getPartitionsByNames(String dbName, String tblName, + List partNames) throws MetaException, + NoSuchObjectException { + List parts = new ArrayList(); + for (String partName : partNames) { + parts.add(getPartition(dbName, tblName, partNameToVals(partName))); + } + return parts; + } + + @Override + public Table markPartitionForEvent(String dbName, String tblName, Map partVals, + PartitionEventType evtType) throws MetaException, + UnknownTableException, InvalidPartitionException, UnknownPartitionException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isPartitionMarkedForEvent(String dbName, String tblName, + Map partName, + PartitionEventType evtType) throws MetaException, + UnknownTableException, InvalidPartitionException, UnknownPartitionException { + throw new UnsupportedOperationException(); + } + + /* + * The design for roles. Roles are a pain because of their hierarchical nature. When a user + * comes in and we need to be able to determine all roles he is a part of, we do not want to + * have to walk the hierarchy in the database. This means we need to flatten the role map for + * each user. But we also have to track how the roles are connected for each user, in case one + * role is revoked from another (e.g. if role1 is included in role2 but then revoked + * from it and user1 was granted both role2 and role1 we cannot remove user1 from role1 + * because he was granted that separately). + * + * We want to optimize for the read case and put the cost on grant and revoke of roles, since + * we assume that is the less common case. So we lay out the roles data as follows: + * + * There is a ROLES table that records each role, plus what other principals have been granted + * into it, along with the info on grantor, etc. + * + * There is a USER_TO_ROLES table that contains the mapping of each user to every role he is a + * part of. + * + * This makes determining what roles a user participates in very quick, as USER_TO_ROLE is a + * simple list for each user. It makes granting users into roles expensive, and granting roles + * into roles very expensive. Each time a user is granted into a role, we need to walk the + * hierarchy in the role table (which means moving through that table multiple times) to + * determine every role the user participates in. Each a role is granted into another role + * this hierarchical walk must be done for every principal in the role being granted into. To + * mitigate this pain somewhat whenever doing these mappings we cache the entire ROLES table in + * memory since we assume it is not large. + * + * On a related note, whenever a role is dropped we must walk not only all these role tables + * above (equivalent to a role being revoked from another role, since we have to rebuilding + * mappings for any users in roles that contained that role and any users directly in that + * role), but we also have to remove all the privileges associated with that role directly. + * That means a walk of the DBS table and of the TBLS table. + */ + + @Override + public boolean addRole(String roleName, String ownerName) throws InvalidObjectException, + MetaException, NoSuchObjectException { + int now = (int)(System.currentTimeMillis()/1000); + Role role = new Role(roleName, now, ownerName); + boolean commit = false; + openTransaction(); + try { + if (getHBase().getRole(roleName) != null) { + throw new InvalidObjectException("Role " + roleName + " already exists"); + } + getHBase().putRole(role); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to create role ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + Set usersInRole = getHBase().findAllUsersInRole(roleName); + getHBase().deleteRole(roleName); + getHBase().removeRoleGrants(roleName); + for (String user : usersInRole) { + getHBase().buildRoleMapForUser(user); + } + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to delete role" + e); + throw new MetaException("Unable to drop role " + roleName); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) + throws MetaException, NoSuchObjectException, InvalidObjectException { + boolean commit = false; + openTransaction(); + try { + Set usersToRemap = findUsersToRemapRolesFor(role, userName, principalType); + HbaseMetastoreProto.RoleGrantInfo.Builder builder = + HbaseMetastoreProto.RoleGrantInfo.newBuilder(); + if (userName != null) builder.setPrincipalName(userName); + if (principalType != null) { + builder.setPrincipalType(HBaseUtils.convertPrincipalTypes(principalType)); + } + builder.setAddTime((int)(System.currentTimeMillis() / 1000)); + if (grantor != null) builder.setGrantor(grantor); + if (grantorType != null) { + builder.setGrantorType(HBaseUtils.convertPrincipalTypes(grantorType)); + } + builder.setGrantOption(grantOption); + + getHBase().addPrincipalToRole(role.getRoleName(), builder.build()); + for (String user : usersToRemap) { + getHBase().buildRoleMapForUser(user); + } + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to grant role", e); + throw new MetaException("Unable to grant role " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean grantOption) throws MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + // This can have a couple of different meanings. If grantOption is true, then this is only + // revoking the grant option, the role itself doesn't need to be removed. If it is false + // then we need to remove the userName from the role altogether. + try { + if (grantOption) { + // If this is a grant only change, we don't need to rebuild the user mappings. + getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption); + } else { + Set usersToRemap = findUsersToRemapRolesFor(role, userName, principalType); + getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption); + for (String user : usersToRemap) { + getHBase().buildRoleMapForUser(user); + } + } + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to revoke role " + role.getRoleName() + " from " + userName, e); + throw new MetaException("Unable to revoke role " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + PrincipalPrivilegeSet global = getHBase().getGlobalPrivs(); + if (global == null) return null; + List pgi; + if (global.getUserPrivileges() != null) { + pgi = global.getUserPrivileges().get(userName); + if (pgi != null) { + pps.putToUserPrivileges(userName, pgi); + } + } + + if (global.getRolePrivileges() != null) { + List roles = getHBase().getUserRoles(userName); + if (roles != null) { + for (String role : roles) { + pgi = global.getRolePrivileges().get(role); + if (pgi != null) { + pps.putToRolePrivileges(role, pgi); + } + } + } + } + commit = true; + return pps; + } catch (IOException e) { + LOG.error("Unable to get db privileges for user", e); + throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + List groupNames) + throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + Database db = getHBase().getDb(dbName); + if (db.getPrivileges() != null) { + List pgi; + // Find the user privileges for this db + if (db.getPrivileges().getUserPrivileges() != null) { + pgi = db.getPrivileges().getUserPrivileges().get(userName); + if (pgi != null) { + pps.putToUserPrivileges(userName, pgi); + } + } + + if (db.getPrivileges().getRolePrivileges() != null) { + List roles = getHBase().getUserRoles(userName); + if (roles != null) { + for (String role : roles) { + pgi = db.getPrivileges().getRolePrivileges().get(role); + if (pgi != null) { + pps.putToRolePrivileges(role, pgi); + } + } + } + } + } + commit = true; + return pps; + } catch (IOException e) { + LOG.error("Unable to get db privileges for user", e); + throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet(); + Table table = getHBase().getTable(dbName, tableName); + List pgi; + if (table.getPrivileges() != null) { + if (table.getPrivileges().getUserPrivileges() != null) { + pgi = table.getPrivileges().getUserPrivileges().get(userName); + if (pgi != null) { + pps.putToUserPrivileges(userName, pgi); + } + } + + if (table.getPrivileges().getRolePrivileges() != null) { + List roles = getHBase().getUserRoles(userName); + if (roles != null) { + for (String role : roles) { + pgi = table.getPrivileges().getRolePrivileges().get(role); + if (pgi != null) { + pps.putToRolePrivileges(role, pgi); + } + } + } + } + } + commit = true; + return pps; + } catch (IOException e) { + LOG.error("Unable to get db privileges for user", e); + throw new MetaException("Unable to get db privileges for user, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + String partition, String userName, + List groupNames) throws + InvalidObjectException, MetaException { + // We don't support partition privileges + return null; + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + String partitionName, String columnName, + String userName, + List groupNames) throws + InvalidObjectException, MetaException { + // We don't support column level privileges + return null; + } + + @Override + public List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { + List grants; + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), principalName, principalType, pgi)); + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPrincipalDBGrants(String principalName, + PrincipalType principalType, + String dbName) { + List grants; + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Database db = getHBase().getDb(dbName); + if (db == null) return privileges; + PrincipalPrivilegeSet pps = db.getPrivileges(); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, + null, null, null), principalName, principalType, pgi)); + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listAllTableGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName) { + List grants; + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Table table = getHBase().getTable(dbName, tableName); + if (table == null) return privileges; + PrincipalPrivilegeSet pps = table.getPrivileges(); + if (pps == null) return privileges; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + if (map == null) return privileges; + grants = map.get(principalName); + + if (grants == null || grants.size() == 0) return privileges; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), principalName, principalType, pgi)); + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + List partValues, + String partName) { + // We don't support partition grants + return new ArrayList(); + } + + @Override + public List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, + String dbName, String tableName, + String columnName) { + // We don't support column grants + return new ArrayList(); + } + + @Override + public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + List partVals, + String partName, + String columnName) { + // We don't support column grants + return new ArrayList(); + } + + @Override + public boolean grantPrivileges(PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + // Locate the right object to deal with + PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv); + + // Now, let's see if we've already got this privilege + for (PrivilegeGrantInfo info : privilegeInfo.grants) { + if (info.getPrivilege().equals(priv.getGrantInfo().getPrivilege())) { + throw new InvalidObjectException(priv.getPrincipalName() + " already has " + + priv.getGrantInfo().getPrivilege() + " on " + privilegeInfo.typeErrMsg); + } + } + privilegeInfo.grants.add(priv.getGrantInfo()); + + writeBackGrantOrRevoke(priv, privilegeInfo); + } + commit = true; + return true; + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws + InvalidObjectException, MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv); + + for (int i = 0; i < privilegeInfo.grants.size(); i++) { + if (privilegeInfo.grants.get(i).getPrivilege().equals( + priv.getGrantInfo().getPrivilege())) { + if (grantOption) privilegeInfo.grants.get(i).setGrantOption(false); + else privilegeInfo.grants.remove(i); + break; + } + } + writeBackGrantOrRevoke(priv, privilegeInfo); + } + commit = true; + return true; + } finally { + commitOrRoleBack(commit); + } + } + + private static class PrivilegeInfo { + Database db; + Table table; + List grants; + String typeErrMsg; + PrincipalPrivilegeSet privSet; + } + + private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege) + throws MetaException, NoSuchObjectException, InvalidObjectException { + PrivilegeInfo result = new PrivilegeInfo(); + switch (privilege.getHiveObject().getObjectType()) { + case GLOBAL: + try { + result.privSet = createOnNull(getHBase().getGlobalPrivs()); + } catch (IOException e) { + LOG.error("Unable to fetch global privileges", e); + throw new MetaException("Unable to fetch global privileges, " + e.getMessage()); + } + result.typeErrMsg = "global"; + break; + + case DATABASE: + result.db = getDatabase(privilege.getHiveObject().getDbName()); + result.typeErrMsg = "database " + result.db.getName(); + result.privSet = createOnNull(result.db.getPrivileges()); + break; + + case TABLE: + result.table = getTable(privilege.getHiveObject().getDbName(), + privilege.getHiveObject().getObjectName()); + result.typeErrMsg = "table " + result.table.getTableName(); + result.privSet = createOnNull(result.table.getPrivileges()); + break; + + case PARTITION: + case COLUMN: + throw new RuntimeException("HBase metastore does not support partition or column " + + "permissions"); + + default: + throw new RuntimeException("Woah bad, unknown object type " + + privilege.getHiveObject().getObjectType()); + } + + // Locate the right PrivilegeGrantInfo + Map> grantInfos; + switch (privilege.getPrincipalType()) { + case USER: + grantInfos = result.privSet.getUserPrivileges(); + result.typeErrMsg = "user"; + break; + + case GROUP: + throw new RuntimeException("HBase metastore does not support group permissions"); + + case ROLE: + grantInfos = result.privSet.getRolePrivileges(); + result.typeErrMsg = "role"; + break; + + default: + throw new RuntimeException("Woah bad, unknown principal type " + + privilege.getPrincipalType()); + } + + // Find the requested name in the grantInfo + result.grants = grantInfos.get(privilege.getPrincipalName()); + if (result.grants == null) { + // Means we don't have any grants for this user yet. + result.grants = new ArrayList(); + grantInfos.put(privilege.getPrincipalName(), result.grants); + } + return result; + } + + private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) { + // If this is the first time a user has been granted a privilege set will be null. + if (pps == null) { + pps = new PrincipalPrivilegeSet(); + } + if (pps.getUserPrivileges() == null) { + pps.setUserPrivileges(new HashMap>()); + } + if (pps.getRolePrivileges() == null) { + pps.setRolePrivileges(new HashMap>()); + } + return pps; + } + + private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi) + throws MetaException, NoSuchObjectException, InvalidObjectException { + // Now write it back + switch (priv.getHiveObject().getObjectType()) { + case GLOBAL: + try { + getHBase().putGlobalPrivs(pi.privSet); + } catch (IOException e) { + LOG.error("Unable to write global privileges", e); + throw new MetaException("Unable to write global privileges, " + e.getMessage()); + } + break; + + case DATABASE: + pi.db.setPrivileges(pi.privSet); + alterDatabase(pi.db.getName(), pi.db); + break; + + case TABLE: + pi.table.setPrivileges(pi.privSet); + alterTable(pi.table.getDbName(), pi.table.getTableName(), pi.table); + break; + + default: + throw new RuntimeException("Dude, you missed the second switch!"); + } + } + + @Override + public Role getRole(String roleName) throws NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + Role role = getHBase().getRole(roleName); + if (role == null) { + throw new NoSuchObjectException("Unable to find role " + roleName); + } + commit = true; + return role; + } catch (IOException e) { + LOG.error("Unable to get role", e); + throw new NoSuchObjectException("Error reading table " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listRoleNames() { + boolean commit = false; + openTransaction(); + try { + List roles = getHBase().scanRoles(); + List roleNames = new ArrayList(roles.size()); + for (Role role : roles) roleNames.add(role.getRoleName()); + commit = true; + return roleNames; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listRoles(String principalName, PrincipalType principalType) { + List roles = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + try { + roles.addAll(getHBase().getPrincipalDirectRoles(principalName, principalType)); + } catch (IOException e) { + throw new RuntimeException(e); + } + // Add the public role if this is a user + if (principalType == PrincipalType.USER) { + roles.add(new Role(HiveMetaStore.PUBLIC, 0, null)); + } + commit = true; + return roles; + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { + boolean commit = false; + openTransaction(); + try { + List roles = listRoles(principalName, principalType); + List rpgs = new ArrayList(roles.size()); + for (Role role : roles) { + HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName()); + if (grants != null) { + for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) { + if (grant.getPrincipalType() == HBaseUtils.convertPrincipalTypes(principalType) && + grant.getPrincipalName().equals(principalName)) { + rpgs.add(new RolePrincipalGrant(role.getRoleName(), principalName, principalType, + grant.getGrantOption(), (int) grant.getAddTime(), grant.getGrantor(), + HBaseUtils.convertPrincipalTypes(grant.getGrantorType()))); + } + } + } + } + commit = true; + return rpgs; + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listRoleMembers(String roleName) { + boolean commit = false; + openTransaction(); + try { + HbaseMetastoreProto.RoleGrantInfoList gil = getHBase().getRolePrincipals(roleName); + List roleMaps = new ArrayList(gil.getGrantInfoList().size()); + for (HbaseMetastoreProto.RoleGrantInfo giw : gil.getGrantInfoList()) { + roleMaps.add(new RolePrincipalGrant(roleName, giw.getPrincipalName(), + HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()), + giw.getGrantOption(), (int)giw.getAddTime(), giw.getGrantor(), + HBaseUtils.convertPrincipalTypes(giw.getGrantorType()))); + } + commit = true; + return roleMaps; + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, + String user_name, List group_names) + throws MetaException, NoSuchObjectException, InvalidObjectException { + // We don't do authorization checks for partitions. + return getPartition(dbName, tblName, partVals); + } + + @Override + public List getPartitionsWithAuth(String dbName, String tblName, short maxParts, + String userName, List groupNames) + throws MetaException, NoSuchObjectException, InvalidObjectException { + // We don't do authorization checks for partitions. + return getPartitions(dbName, tblName, maxParts); + } + + @Override + public List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, + short max_parts) + throws MetaException, NoSuchObjectException { + List parts = + listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null); + List partNames = new ArrayList(parts.size()); + for (Partition part : parts) { + partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues())); + } + return partNames; + } + + + @Override + public List listPartitionsPsWithAuth(String db_name, String tbl_name, + List part_vals, short max_parts, + String userName, List groupNames) + throws MetaException, NoSuchObjectException { + // We don't handle auth info with partitions + boolean commit = false; + openTransaction(); + try { + List parts = getHBase().scanPartitions(HiveStringUtils.normalizeIdentifier(db_name), + HiveStringUtils.normalizeIdentifier(tbl_name), part_vals, max_parts); + commit = true; + return parts; + } catch (IOException e) { + LOG.error("Unable to list partition names", e); + throw new MetaException("Failed to list part names, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws + NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + boolean commit = false; + openTransaction(); + try { + getHBase().updateStatistics(colStats.getStatsDesc().getDbName(), + colStats.getStatsDesc().getTableName(), null, colStats); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to update column statistics", e); + throw new MetaException("Failed to update column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, + List partVals) throws + NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + boolean commit = false; + openTransaction(); + try { + getHBase().updateStatistics(statsObj.getStatsDesc().getDbName(), + statsObj.getStatsDesc().getTableName(), partVals, statsObj); + // We need to invalidate aggregates that include this partition + getHBase().getStatsCache().invalidate(statsObj.getStatsDesc().getDbName(), + statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName()); + commit = true; + return true; + } catch (IOException e) { + LOG.error("Unable to update column statistics", e); + throw new MetaException("Failed to update column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + List colName) throws MetaException, + NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + ColumnStatistics cs = getHBase().getTableStatistics(dbName, tableName, colName); + commit = true; + return cs; + } catch (IOException e) { + LOG.error("Unable to fetch column statistics", e); + throw new MetaException("Failed to fetch column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getPartitionColumnStatistics(String dbName, String tblName, + List partNames, List colNames) throws MetaException, NoSuchObjectException { + List> partVals = new ArrayList>(partNames.size()); + for (String partName : partNames) { + partVals.add(partNameToVals(partName)); + } + boolean commit = false; + openTransaction(); + try { + List cs = + getHBase().getPartitionStatistics(dbName, tblName, partNames, partVals, colNames); + commit = true; + return cs; + } catch (IOException e) { + LOG.error("Unable to fetch column statistics", e); + throw new MetaException("Failed fetching column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + List partVals, String colName) throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + // NOP, stats will be deleted along with the partition when it is dropped. + return true; + } + + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws + NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + // NOP, stats will be deleted along with the table when it is dropped. + return true; + } + + /** + * Return aggregated statistics for each column in the colNames list aggregated over partitions in + * the partNames list + * + */ + @Override + public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + List> partVals = new ArrayList>(partNames.size()); + for (String partName : partNames) { + partVals.add(partNameToVals(partName)); + } + boolean commit = false; + boolean hasAnyStats = false; + openTransaction(); + try { + AggrStats aggrStats = new AggrStats(); + aggrStats.setPartsFound(0); + for (String colName : colNames) { + try { + AggrStats oneCol = + getHBase().getStatsCache().get(dbName, tblName, partNames, colName); + if (oneCol.getColStatsSize() > 0) { + assert oneCol.getColStatsSize() == 1; + aggrStats.setPartsFound(oneCol.getPartsFound()); + aggrStats.addToColStats(oneCol.getColStats().get(0)); + hasAnyStats = true; + } + } catch (CacheLoader.InvalidCacheLoadException e) { + LOG.debug("Found no stats for column " + colName); + // This means we have no stats at all for this column for these partitions, so just + // move on. + } + } + commit = true; + if (!hasAnyStats) { + // Set the required field. + aggrStats.setColStats(new ArrayList()); + } + return aggrStats; + } catch (IOException e) { + LOG.error("Unable to fetch aggregate column statistics", e); + throw new MetaException("Failed fetching aggregate column statistics, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public long cleanupEvents() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addToken(String tokenIdentifier, String delegationToken) { + boolean commit = false; + openTransaction(); + try { + getHBase().putDelegationToken(tokenIdentifier, delegationToken); + commit = true; + return commit; // See HIVE-11302, for now always returning true + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean removeToken(String tokenIdentifier) { + boolean commit = false; + openTransaction(); + try { + getHBase().deleteDelegationToken(tokenIdentifier); + commit = true; + return commit; // See HIVE-11302, for now always returning true + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public String getToken(String tokenIdentifier) { + boolean commit = false; + openTransaction(); + try { + String token = getHBase().getDelegationToken(tokenIdentifier); + commit = true; + return token; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getAllTokenIdentifiers() { + boolean commit = false; + openTransaction(); + try { + List ids = getHBase().scanDelegationTokenIdentifiers(); + commit = true; + return ids; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public int addMasterKey(String key) throws MetaException { + boolean commit = false; + openTransaction(); + try { + long seq = getHBase().getNextSequence(HBaseReadWrite.MASTER_KEY_SEQUENCE); + getHBase().putMasterKey((int) seq, key); + commit = true; + return (int)seq; + } catch (IOException e) { + LOG.error("Unable to add master key", e); + throw new MetaException("Failed adding master key, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, + MetaException { + boolean commit = false; + openTransaction(); + try { + if (getHBase().getMasterKey(seqNo) == null) { + throw new NoSuchObjectException("No key found with keyId: " + seqNo); + } + getHBase().putMasterKey(seqNo, key); + commit = true; + } catch (IOException e) { + LOG.error("Unable to update master key", e); + throw new MetaException("Failed updating master key, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public boolean removeMasterKey(Integer keySeq) { + boolean commit = false; + openTransaction(); + try { + getHBase().deleteMasterKey(keySeq); + commit = true; + return true; // See HIVE-11302, for now always returning true + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public String[] getMasterKeys() { + boolean commit = false; + openTransaction(); + try { + List keys = getHBase().scanMasterKeys(); + commit = true; + return keys.toArray(new String[keys.size()]); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void verifySchema() throws MetaException { + + } + + @Override + public String getMetaStoreSchemaVersion() throws MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropPartitions(String dbName, String tblName, List partNames) throws + MetaException, NoSuchObjectException { + boolean commit = false; + openTransaction(); + try { + for (String partName : partNames) { + dropPartition(dbName, tblName, partNameToVals(partName)); + } + commit = true; + } catch (Exception e) { + LOG.error("Unable to drop partitions", e); + throw new NoSuchObjectException("Failure dropping partitions, " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPrincipalDBGrantsAll(String principalName, + PrincipalType principalType) { + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + List dbs = getHBase().scanDatabases(null); + for (Database db : dbs) { + List grants; + + PrincipalPrivilegeSet pps = db.getPrivileges(); + if (pps == null) continue; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + + if (map == null) continue; + grants = map.get(principalName); + if (grants == null || grants.size() == 0) continue; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, + db.getName(), null, null, null), principalName, principalType, pgi)); + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPrincipalTableGrantsAll(String principalName, + PrincipalType principalType) { + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + List
tables = getHBase().scanTables(null, null); + for (Table table : tables) { + List grants; + + PrincipalPrivilegeSet pps = table.getPrivileges(); + if (pps == null) continue; + Map> map; + switch (principalType) { + case USER: + map = pps.getUserPrivileges(); + break; + + case ROLE: + map = pps.getRolePrivileges(); + break; + + default: + throw new RuntimeException("Unknown or unsupported principal type " + + principalType.toString()); + } + + if (map == null) continue; + grants = map.get(principalName); + if (grants == null || grants.size() == 0) continue; + for (PrivilegeGrantInfo pgi : grants) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, + table.getDbName(), table.getTableName(), null, null), principalName, principalType, + pgi)); + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPrincipalPartitionGrantsAll(String principalName, + PrincipalType principalType) { + return new ArrayList(); + } + + @Override + public List listPrincipalTableColumnGrantsAll(String principalName, + PrincipalType principalType) { + return new ArrayList(); + } + + @Override + public List listPrincipalPartitionColumnGrantsAll(String principalName, + PrincipalType principalType) { + return new ArrayList(); + } + + @Override + public List listGlobalGrantsAll() { + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs(); + if (pps != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.USER, pgi)); + } + } + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listDBGrantsAll(String dbName) { + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Database db = getHBase().getDb(dbName); + PrincipalPrivilegeSet pps = db.getPrivileges(); + if (pps != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, + null, null, null), e.getKey(), PrincipalType.USER, pgi)); + } + } + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName, + null, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPartitionColumnGrantsAll(String dbName, String tableName, + String partitionName, + String columnName) { + return new ArrayList(); + } + + @Override + public List listTableGrantsAll(String dbName, String tableName) { + List privileges = new ArrayList(); + boolean commit = false; + openTransaction(); + try { + Table table = getHBase().getTable(dbName, tableName); + PrincipalPrivilegeSet pps = table.getPrivileges(); + if (pps != null) { + for (Map.Entry> e : pps.getUserPrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.USER, pgi)); + } + } + for (Map.Entry> e : pps.getRolePrivileges().entrySet()) { + for (PrivilegeGrantInfo pgi : e.getValue()) { + privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi)); + } + } + } + commit = true; + return privileges; + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List listPartitionGrantsAll(String dbName, String tableName, + String partitionName) { + return new ArrayList(); + } + + @Override + public List listTableColumnGrantsAll(String dbName, String tableName, + String columnName) { + return new ArrayList(); + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + getHBase().putFunction(func); + commit = true; + } catch (IOException e) { + LOG.error("Unable to create function", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void alterFunction(String dbName, String funcName, Function newFunction) throws + InvalidObjectException, MetaException { + boolean commit = false; + openTransaction(); + try { + getHBase().putFunction(newFunction); + commit = true; + } catch (IOException e) { + LOG.error("Unable to alter function ", e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void dropFunction(String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException { + boolean commit = false; + openTransaction(); + try { + getHBase().deleteFunction(dbName, funcName); + commit = true; + } catch (IOException e) { + LOG.error("Unable to delete function" + e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public Function getFunction(String dbName, String funcName) throws MetaException { + boolean commit = false; + openTransaction(); + try { + Function func = getHBase().getFunction(dbName, funcName); + commit = true; + return func; + } catch (IOException e) { + LOG.error("Unable to get function" + e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getAllFunctions() throws MetaException { + boolean commit = false; + openTransaction(); + try { + List funcs = getHBase().scanFunctions(null, ".*"); + commit = true; + return funcs; + } catch (IOException e) { + LOG.error("Unable to get functions" + e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public List getFunctions(String dbName, String pattern) throws MetaException { + boolean commit = false; + openTransaction(); + try { + List funcs = getHBase().scanFunctions(dbName, likeToRegex(pattern)); + List funcNames = new ArrayList(funcs.size()); + for (Function func : funcs) funcNames.add(func.getFunctionName()); + commit = true; + return funcNames; + } catch (IOException e) { + LOG.error("Unable to get functions" + e); + throw new MetaException("Unable to read from or write to hbase " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + throw new UnsupportedOperationException(); + } + + @Override + public void addNotificationEvent(NotificationEvent event) { + throw new UnsupportedOperationException(); + } + + @Override + public void cleanNotificationEvents(int olderThan) { + throw new UnsupportedOperationException(); + } + + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() { + throw new UnsupportedOperationException(); + } + + @Override + public void flushCache() { + getHBase().flushCatalogCache(); + } + + @Override + public void setConf(Configuration configuration) { + // initialize expressionProxy. Also re-initialize it if + // setConf is being called with new configuration object (though that + // is not expected to happen, doing it just for safety) + if(expressionProxy == null || conf != configuration) { + expressionProxy = PartFilterExprUtil.createExpressionProxy(configuration); + } + conf = configuration; + } + + @Override + public Configuration getConf() { + return conf; + + } + + private HBaseReadWrite getHBase() { + if (hbase == null) hbase = HBaseReadWrite.getInstance(conf); + return hbase; + } + + // This is for building error messages only. It does not look up anything in the metastore. + private String tableNameForErrorMsg(String dbName, String tableName) { + return dbName + "." + tableName; + } + + // This is for building error messages only. It does not look up anything in the metastore as + // they may just throw another error. + private String partNameForErrorMsg(String dbName, String tableName, List partVals) { + return tableNameForErrorMsg(dbName, tableName) + "." + StringUtils.join(partVals, ':'); + } + + private String buildExternalPartName(Table table, Partition part) { + return buildExternalPartName(table, part.getValues()); + } + + private String buildExternalPartName(String dbName, String tableName, List partVals) + throws MetaException { + return buildExternalPartName(getTable(dbName, tableName), partVals); + } + + private Set findUsersToRemapRolesFor(Role role, String principalName, PrincipalType type) + throws IOException, NoSuchObjectException { + Set usersToRemap; + switch (type) { + case USER: + // In this case it's just the user being added to the role that we need to remap for. + usersToRemap = new HashSet(); + usersToRemap.add(principalName); + break; + + case ROLE: + // In this case we need to remap for all users in the containing role (not the role being + // granted into the containing role). + usersToRemap = getHBase().findAllUsersInRole(role.getRoleName()); + break; + + default: + throw new RuntimeException("Unknown principal type " + type); + + } + return usersToRemap; + } + + /** + * Build a partition name for external use. Necessary since HBase itself doesn't store + * partition names. + * @param table table object + * @param partVals partition values. + * @return + */ + static String buildExternalPartName(Table table, List partVals) { + List partCols = new ArrayList(); + for (FieldSchema pc : table.getPartitionKeys()) partCols.add(pc.getName()); + return FileUtils.makePartName(partCols, partVals); + } + + private static List partNameToVals(String name) { + if (name == null) return null; + List vals = new ArrayList(); + String[] kvp = name.split("/"); + for (String kv : kvp) { + vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1))); + } + return vals; + } + + static List> partNameListToValsList(List partNames) { + List> valLists = new ArrayList>(partNames.size()); + for (String partName : partNames) { + valLists.add(partNameToVals(partName)); + } + return valLists; + } + + private String likeToRegex(String like) { + if (like == null) return null; + // Convert Hive's strange like syntax to Java regex. Per + // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Show + // the supported syntax is that * means Java .* and | means 'or' + // This implementation leaves other regular expression syntax alone, which means people can + // use it, even though it wouldn't work on RDBMS backed metastores. + return like.replace("*", ".*"); + } + + private void commitOrRoleBack(boolean commit) { + if (commit) { + LOG.debug("Committing transaction"); + commitTransaction(); + } else { + LOG.debug("Rolling back transaction"); + rollbackTransaction(); + } + } + + @VisibleForTesting HBaseReadWrite backdoor() { + return getHBase(); + } + + @Override + public ByteBuffer[] getFileMetadata(List fileIds) throws MetaException { + openTransaction(); + boolean commit = true; + try { + return getHBase().getFileMetadata(fileIds); + } catch (IOException e) { + commit = false; + LOG.error("Unable to get file metadata", e); + throw new MetaException("Error reading file metadata " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } + + @Override + public void putFileMetadata(List fileIds, List metadata) throws MetaException { + openTransaction(); + boolean commit = false; + try { + getHBase().storeFileMetadata(fileIds, metadata); + commit = true; + } catch (IOException | InterruptedException e) { + LOG.error("Unable to store file metadata", e); + throw new MetaException("Error storing file metadata " + e.getMessage()); + } finally { + commitOrRoleBack(commit); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java new file mode 100644 index 0000000..1885089 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -0,0 +1,1340 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDeWithEndPrefix; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hive.common.util.BloomFilter; +import org.apache.hive.common.util.HiveStringUtils; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Utility functions + */ +class HBaseUtils { + + final static Charset ENCODING = StandardCharsets.UTF_8; + final static char KEY_SEPARATOR = '\u0001'; + final static String KEY_SEPARATOR_STR = new String(new char[] {KEY_SEPARATOR}); + + static final private Log LOG = LogFactory.getLog(HBaseUtils.class.getName()); + + /** + * Build a key for an object in hbase + * @param components + * @return + */ + static byte[] buildKey(String... components) { + return buildKey(false, components); + } + + static byte[] buildKeyWithTrailingSeparator(String... components) { + return buildKey(true, components); + } + + private static byte[] buildKey(boolean trailingSeparator, String... components) { + String protoKey = StringUtils.join(components, KEY_SEPARATOR); + if (trailingSeparator) protoKey += KEY_SEPARATOR; + return protoKey.getBytes(ENCODING); + } + + private static HbaseMetastoreProto.Parameters buildParameters(Map params) { + List entries = new ArrayList<>(); + for (Map.Entry e : params.entrySet()) { + entries.add( + HbaseMetastoreProto.ParameterEntry.newBuilder() + .setKey(e.getKey()) + .setValue(e.getValue()) + .build()); + } + return HbaseMetastoreProto.Parameters.newBuilder() + .addAllParameter(entries) + .build(); + } + + private static Map buildParameters(HbaseMetastoreProto.Parameters protoParams) { + Map params = new HashMap<>(); + for (HbaseMetastoreProto.ParameterEntry pe : protoParams.getParameterList()) { + params.put(pe.getKey(), pe.getValue()); + } + return params; + } + + + private static List + buildPrincipalPrivilegeSetEntry(Map> entries) { + List results = new ArrayList<>(); + for (Map.Entry> entry : entries.entrySet()) { + results.add(HbaseMetastoreProto.PrincipalPrivilegeSetEntry.newBuilder() + .setPrincipalName(entry.getKey()) + .addAllPrivileges(buildPrivilegeGrantInfo(entry.getValue())) + .build()); + } + return results; + } + + private static List buildPrivilegeGrantInfo( + List privileges) { + List results = new ArrayList<>(); + for (PrivilegeGrantInfo privilege : privileges) { + HbaseMetastoreProto.PrivilegeGrantInfo.Builder builder = + HbaseMetastoreProto.PrivilegeGrantInfo.newBuilder(); + if (privilege.getPrivilege() != null) builder.setPrivilege(privilege.getPrivilege()); + builder.setCreateTime(privilege.getCreateTime()); + if (privilege.getGrantor() != null) builder.setGrantor(privilege.getGrantor()); + if (privilege.getGrantorType() != null) { + builder.setGrantorType(convertPrincipalTypes(privilege.getGrantorType())); + } + builder.setGrantOption(privilege.isGrantOption()); + results.add(builder.build()); + } + return results; + } + + /** + * Convert Thrift.PrincipalType to HbaseMetastoreProto.principalType + * @param type + * @return + */ + static HbaseMetastoreProto.PrincipalType convertPrincipalTypes(PrincipalType type) { + switch (type) { + case USER: return HbaseMetastoreProto.PrincipalType.USER; + case ROLE: return HbaseMetastoreProto.PrincipalType.ROLE; + default: throw new RuntimeException("Unknown principal type " + type.toString()); + } + } + + /** + * Convert principalType from HbaseMetastoreProto to Thrift.PrincipalType + * @param type + * @return + */ + static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType type) { + switch (type) { + case USER: return PrincipalType.USER; + case ROLE: return PrincipalType.ROLE; + default: throw new RuntimeException("Unknown principal type " + type.toString()); + } + } + + private static Map> convertPrincipalPrivilegeSetEntries( + List entries) { + Map> map = new HashMap<>(); + for (HbaseMetastoreProto.PrincipalPrivilegeSetEntry entry : entries) { + map.put(entry.getPrincipalName(), convertPrivilegeGrantInfos(entry.getPrivilegesList())); + } + return map; + } + + private static List convertPrivilegeGrantInfos( + List privileges) { + List results = new ArrayList<>(); + for (HbaseMetastoreProto.PrivilegeGrantInfo proto : privileges) { + PrivilegeGrantInfo pgi = new PrivilegeGrantInfo(); + if (proto.hasPrivilege()) pgi.setPrivilege(proto.getPrivilege()); + pgi.setCreateTime((int)proto.getCreateTime()); + if (proto.hasGrantor()) pgi.setGrantor(proto.getGrantor()); + if (proto.hasGrantorType()) { + pgi.setGrantorType(convertPrincipalTypes(proto.getGrantorType())); + } + if (proto.hasGrantOption()) pgi.setGrantOption(proto.getGrantOption()); + results.add(pgi); + } + return results; + } + + private static HbaseMetastoreProto.PrincipalPrivilegeSet + buildPrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { + HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builder = + HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(); + if (pps.getUserPrivileges() != null) { + builder.addAllUsers(buildPrincipalPrivilegeSetEntry(pps.getUserPrivileges())); + } + if (pps.getRolePrivileges() != null) { + builder.addAllRoles(buildPrincipalPrivilegeSetEntry(pps.getRolePrivileges())); + } + return builder.build(); + } + + private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( + HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { + PrincipalPrivilegeSet pps = null; + if (!proto.getUsersList().isEmpty() || !proto.getRolesList().isEmpty()) { + pps = new PrincipalPrivilegeSet(); + if (!proto.getUsersList().isEmpty()) { + pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList())); + } + if (!proto.getRolesList().isEmpty()) { + pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList())); + } + } + return pps; + } + /** + * Serialize a PrincipalPrivilegeSet + * @param pps + * @return + */ + static byte[] serializePrincipalPrivilegeSet(PrincipalPrivilegeSet pps) { + return buildPrincipalPrivilegeSet(pps).toByteArray(); + } + + /** + * Deserialize a PrincipalPrivilegeSet + * @param serialized + * @return + * @throws InvalidProtocolBufferException + */ + static PrincipalPrivilegeSet deserializePrincipalPrivilegeSet(byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.PrincipalPrivilegeSet proto = + HbaseMetastoreProto.PrincipalPrivilegeSet.parseFrom(serialized); + return buildPrincipalPrivilegeSet(proto); + } + + /** + * Serialize a role + * @param role + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeRole(Role role) { + byte[][] result = new byte[2][]; + result[0] = buildKey(role.getRoleName()); + HbaseMetastoreProto.Role.Builder builder = HbaseMetastoreProto.Role.newBuilder(); + builder.setCreateTime(role.getCreateTime()); + if (role.getOwnerName() != null) builder.setOwnerName(role.getOwnerName()); + result[1] = builder.build().toByteArray(); + return result; + } + + /** + * Deserialize a role. This method should be used when the rolename is already known as it + * doesn't have to re-deserialize it. + * @param roleName name of the role + * @param value value fetched from hbase + * @return A role + * @throws InvalidProtocolBufferException + */ + static Role deserializeRole(String roleName, byte[] value) + throws InvalidProtocolBufferException { + Role role = new Role(); + role.setRoleName(roleName); + HbaseMetastoreProto.Role protoRole = + HbaseMetastoreProto.Role.parseFrom(value); + role.setCreateTime((int)protoRole.getCreateTime()); + if (protoRole.hasOwnerName()) role.setOwnerName(protoRole.getOwnerName()); + return role; + } + + /** + * Deserialize a role. This method should be used when the rolename is not already known (eg + * when doing a scan). + * @param key key from hbase + * @param value value from hbase + * @return a role + * @throws InvalidProtocolBufferException + */ + static Role deserializeRole(byte[] key, byte[] value) + throws InvalidProtocolBufferException { + String roleName = new String(key, ENCODING); + return deserializeRole(roleName, value); + } + + /** + * Serialize a list of role names + * @param roles + * @return + */ + static byte[] serializeRoleList(List roles) { + return HbaseMetastoreProto.RoleList.newBuilder() + .addAllRole(roles) + .build() + .toByteArray(); + } + + static List deserializeRoleList(byte[] value) throws InvalidProtocolBufferException { + HbaseMetastoreProto.RoleList proto = HbaseMetastoreProto.RoleList.parseFrom(value); + return new ArrayList<>(proto.getRoleList()); + } + + /** + * Serialize a database + * @param db + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeDatabase(Database db) { + byte[][] result = new byte[2][]; + result[0] = buildKey(HiveStringUtils.normalizeIdentifier(db.getName())); + HbaseMetastoreProto.Database.Builder builder = HbaseMetastoreProto.Database.newBuilder(); + + if (db.getDescription() != null) builder.setDescription(db.getDescription()); + if (db.getLocationUri() != null) builder.setUri(db.getLocationUri()); + if (db.getParameters() != null) builder.setParameters(buildParameters(db.getParameters())); + if (db.getPrivileges() != null) { + builder.setPrivileges(buildPrincipalPrivilegeSet(db.getPrivileges())); + } + if (db.getOwnerName() != null) builder.setOwnerName(db.getOwnerName()); + if (db.getOwnerType() != null) builder.setOwnerType(convertPrincipalTypes(db.getOwnerType())); + + result[1] = builder.build().toByteArray(); + return result; + } + + /** + * Deserialize a database. This method should be used when the db anme is already known as it + * doesn't have to re-deserialize it. + * @param dbName name of the role + * @param value value fetched from hbase + * @return A database + * @throws InvalidProtocolBufferException + */ + static Database deserializeDatabase(String dbName, byte[] value) + throws InvalidProtocolBufferException { + Database db = new Database(); + db.setName(dbName); + HbaseMetastoreProto.Database protoDb = HbaseMetastoreProto.Database.parseFrom(value); + if (protoDb.hasDescription()) db.setDescription(protoDb.getDescription()); + if (protoDb.hasUri()) db.setLocationUri(protoDb.getUri()); + if (protoDb.hasParameters()) db.setParameters(buildParameters(protoDb.getParameters())); + if (protoDb.hasPrivileges()) { + db.setPrivileges(buildPrincipalPrivilegeSet(protoDb.getPrivileges())); + } + if (protoDb.hasOwnerName()) db.setOwnerName(protoDb.getOwnerName()); + if (protoDb.hasOwnerType()) db.setOwnerType(convertPrincipalTypes(protoDb.getOwnerType())); + + return db; + } + + /** + * Deserialize a database. This method should be used when the db name is not already known (eg + * when doing a scan). + * @param key key from hbase + * @param value value from hbase + * @return a role + * @throws InvalidProtocolBufferException + */ + static Database deserializeDatabase(byte[] key, byte[] value) + throws InvalidProtocolBufferException { + String dbName = new String(key, ENCODING); + return deserializeDatabase(dbName, value); + } + + /** + * Serialize a function + * @param func function to serialize + * @return two byte arrays, first contains the key, the second the value. + */ + static byte[][] serializeFunction(Function func) { + byte[][] result = new byte[2][]; + result[0] = buildKey(func.getDbName(), func.getFunctionName()); + HbaseMetastoreProto.Function.Builder builder = HbaseMetastoreProto.Function.newBuilder(); + if (func.getClassName() != null) builder.setClassName(func.getClassName()); + if (func.getOwnerName() != null) builder.setOwnerName(func.getOwnerName()); + if (func.getOwnerType() != null) { + builder.setOwnerType(convertPrincipalTypes(func.getOwnerType())); + } + builder.setCreateTime(func.getCreateTime()); + if (func.getFunctionType() != null) { + builder.setFunctionType(convertFunctionTypes(func.getFunctionType())); + } + if (func.getResourceUris() != null) { + for (ResourceUri uri : func.getResourceUris()) { + builder.addResourceUris(HbaseMetastoreProto.Function.ResourceUri.newBuilder() + .setResourceType(convertResourceTypes(uri.getResourceType())) + .setUri(uri.getUri())); + } + } + result[1] = builder.build().toByteArray(); + return result; + } + + /** + * Deserialize a function. This method should be used when the function and db name are + * already known. + * @param dbName name of the database the function is in + * @param functionName name of the function + * @param value serialized value of the function + * @return function as an object + * @throws InvalidProtocolBufferException + */ + static Function deserializeFunction(String dbName, String functionName, byte[] value) + throws InvalidProtocolBufferException { + Function func = new Function(); + func.setDbName(dbName); + func.setFunctionName(functionName); + HbaseMetastoreProto.Function protoFunc = HbaseMetastoreProto.Function.parseFrom(value); + if (protoFunc.hasClassName()) func.setClassName(protoFunc.getClassName()); + if (protoFunc.hasOwnerName()) func.setOwnerName(protoFunc.getOwnerName()); + if (protoFunc.hasOwnerType()) { + func.setOwnerType(convertPrincipalTypes(protoFunc.getOwnerType())); + } + func.setCreateTime((int)protoFunc.getCreateTime()); + if (protoFunc.hasFunctionType()) { + func.setFunctionType(convertFunctionTypes(protoFunc.getFunctionType())); + } + for (HbaseMetastoreProto.Function.ResourceUri protoUri : protoFunc.getResourceUrisList()) { + func.addToResourceUris(new ResourceUri(convertResourceTypes(protoUri.getResourceType()), + protoUri.getUri())); + } + return func; + } + + /** + * Deserialize a function. This method should be used when the dbname and function name are + * not already known, such as in a scan. + * @param key key from hbase + * @param value value from hbase + * @return function object + * @throws InvalidProtocolBufferException + */ + static Function deserializeFunction(byte[] key, byte[] value) + throws InvalidProtocolBufferException { + String[] keys = deserializeKey(key); + return deserializeFunction(keys[0], keys[1], value); + } + + private static HbaseMetastoreProto.Function.FunctionType convertFunctionTypes(FunctionType type) { + switch (type) { + case JAVA: return HbaseMetastoreProto.Function.FunctionType.JAVA; + default: throw new RuntimeException("Unknown function type " + type.toString()); + } + } + + private static FunctionType convertFunctionTypes(HbaseMetastoreProto.Function.FunctionType type) { + switch (type) { + case JAVA: return FunctionType.JAVA; + default: throw new RuntimeException("Unknown function type " + type.toString()); + } + } + + private static HbaseMetastoreProto.Function.ResourceUri.ResourceType + convertResourceTypes(ResourceType type) { + switch (type) { + case JAR: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.JAR; + case FILE: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.FILE; + case ARCHIVE: return HbaseMetastoreProto.Function.ResourceUri.ResourceType.ARCHIVE; + default: throw new RuntimeException("Unknown resource type " + type.toString()); + } + } + + private static ResourceType convertResourceTypes( + HbaseMetastoreProto.Function.ResourceUri.ResourceType type) { + switch (type) { + case JAR: return ResourceType.JAR; + case FILE: return ResourceType.FILE; + case ARCHIVE: return ResourceType.ARCHIVE; + default: throw new RuntimeException("Unknown resource type " + type.toString()); + } + } + + private static List + convertFieldSchemaListFromProto(List protoList) { + List schemas = new ArrayList<>(protoList.size()); + for (HbaseMetastoreProto.FieldSchema proto : protoList) { + schemas.add(new FieldSchema(proto.getName(), proto.getType(), + proto.hasComment() ? proto.getComment() : null)); + } + return schemas; + } + + private static List + convertFieldSchemaListToProto(List schemas) { + List protoList = new ArrayList<>(schemas.size()); + for (FieldSchema fs : schemas) { + HbaseMetastoreProto.FieldSchema.Builder builder = + HbaseMetastoreProto.FieldSchema.newBuilder(); + builder + .setName(fs.getName()) + .setType(fs.getType()); + if (fs.getComment() != null) builder.setComment(fs.getComment()); + protoList.add(builder.build()); + } + return protoList; + } + + /** + * Serialize a storage descriptor. + * @param sd storage descriptor to serialize + * @return serialized storage descriptor. + */ + static byte[] serializeStorageDescriptor(StorageDescriptor sd) { + HbaseMetastoreProto.StorageDescriptor.Builder builder = + HbaseMetastoreProto.StorageDescriptor.newBuilder(); + builder.addAllCols(convertFieldSchemaListToProto(sd.getCols())); + if (sd.getInputFormat() != null) { + builder.setInputFormat(sd.getInputFormat()); + } + if (sd.getOutputFormat() != null) { + builder.setOutputFormat(sd.getOutputFormat()); + } + builder.setIsCompressed(sd.isCompressed()); + builder.setNumBuckets(sd.getNumBuckets()); + if (sd.getSerdeInfo() != null) { + HbaseMetastoreProto.StorageDescriptor.SerDeInfo.Builder serdeBuilder = + HbaseMetastoreProto.StorageDescriptor.SerDeInfo.newBuilder(); + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + serdeBuilder.setName(serde.getName()); + } + if (serde.getSerializationLib() != null) { + serdeBuilder.setSerializationLib(serde.getSerializationLib()); + } + if (serde.getParameters() != null) { + serdeBuilder.setParameters(buildParameters(serde.getParameters())); + } + builder.setSerdeInfo(serdeBuilder); + } + if (sd.getBucketCols() != null) { + builder.addAllBucketCols(sd.getBucketCols()); + } + if (sd.getSortCols() != null) { + List orders = sd.getSortCols(); + List protoList = new ArrayList<>(orders.size()); + for (Order order : orders) { + protoList.add(HbaseMetastoreProto.StorageDescriptor.Order.newBuilder() + .setColumnName(order.getCol()) + .setOrder(order.getOrder()) + .build()); + } + builder.addAllSortCols(protoList); + } + if (sd.getSkewedInfo() != null) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.Builder skewBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.newBuilder(); + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + skewBuilder.addAllSkewedColNames(skewed.getSkewedColNames()); + } + if (skewed.getSkewedColValues() != null) { + for (List innerList : skewed.getSkewedColValues()) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.Builder listBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList.newBuilder(); + listBuilder.addAllSkewedColValue(innerList); + skewBuilder.addSkewedColValues(listBuilder); + } + } + if (skewed.getSkewedColValueLocationMaps() != null) { + for (Map.Entry, String> e : skewed.getSkewedColValueLocationMaps().entrySet()) { + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.Builder mapBuilder = + HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap.newBuilder(); + mapBuilder.addAllKey(e.getKey()); + mapBuilder.setValue(e.getValue()); + skewBuilder.addSkewedColValueLocationMaps(mapBuilder); + } + } + builder.setSkewedInfo(skewBuilder); + } + builder.setStoredAsSubDirectories(sd.isStoredAsSubDirectories()); + + return builder.build().toByteArray(); + } + + /** + * Produce a hash for the storage descriptor + * @param sd storage descriptor to hash + * @param md message descriptor to use to generate the hash + * @return the hash as a byte array + */ + static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { + // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different + // results for hashes based on the OS or JVM being used. + md.reset(); + for (FieldSchema fs : sd.getCols()) { + md.update(fs.getName().getBytes(ENCODING)); + md.update(fs.getType().getBytes(ENCODING)); + if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); + } + if (sd.getInputFormat() != null) { + md.update(sd.getInputFormat().getBytes(ENCODING)); + } + if (sd.getOutputFormat() != null) { + md.update(sd.getOutputFormat().getBytes(ENCODING)); + } + md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); + if (sd.getSerdeInfo() != null) { + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + md.update(serde.getName().getBytes(ENCODING)); + } + if (serde.getSerializationLib() != null) { + md.update(serde.getSerializationLib().getBytes(ENCODING)); + } + if (serde.getParameters() != null) { + SortedMap params = new TreeMap<>(serde.getParameters()); + for (Map.Entry param : params.entrySet()) { + md.update(param.getKey().getBytes(ENCODING)); + md.update(param.getValue().getBytes(ENCODING)); + } + } + } + if (sd.getBucketCols() != null) { + SortedSet bucketCols = new TreeSet<>(sd.getBucketCols()); + for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); + } + if (sd.getSortCols() != null) { + SortedSet orders = new TreeSet<>(sd.getSortCols()); + for (Order order : orders) { + md.update(order.getCol().getBytes(ENCODING)); + md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); + } + } + if (sd.getSkewedInfo() != null) { + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); + for (String colname : colnames) md.update(colname.getBytes(ENCODING)); + } + if (skewed.getSkewedColValues() != null) { + SortedSet sortedOuterList = new TreeSet<>(); + for (List innerList : skewed.getSkewedColValues()) { + SortedSet sortedInnerList = new TreeSet<>(innerList); + sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); + } + for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); + } + if (skewed.getSkewedColValueLocationMaps() != null) { + SortedMap sortedMap = new TreeMap<>(); + for (Map.Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { + SortedSet sortedKey = new TreeSet<>(smap.getKey()); + sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); + } + for (Map.Entry e : sortedMap.entrySet()) { + md.update(e.getKey().getBytes(ENCODING)); + md.update(e.getValue().getBytes(ENCODING)); + } + } + } + + return md.digest(); + } + + static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.StorageDescriptor proto = + HbaseMetastoreProto.StorageDescriptor.parseFrom(serialized); + StorageDescriptor sd = new StorageDescriptor(); + sd.setCols(convertFieldSchemaListFromProto(proto.getColsList())); + if (proto.hasInputFormat()) sd.setInputFormat(proto.getInputFormat()); + if (proto.hasOutputFormat()) sd.setOutputFormat(proto.getOutputFormat()); + sd.setCompressed(proto.getIsCompressed()); + sd.setNumBuckets(proto.getNumBuckets()); + if (proto.hasSerdeInfo()) { + SerDeInfo serde = new SerDeInfo(); + serde.setName(proto.getSerdeInfo().hasName()? + proto.getSerdeInfo().getName():null); + serde.setSerializationLib(proto.getSerdeInfo().hasSerializationLib()? + proto.getSerdeInfo().getSerializationLib():null); + serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters())); + sd.setSerdeInfo(serde); + } + sd.setBucketCols(new ArrayList<>(proto.getBucketColsList())); + List sortCols = new ArrayList<>(); + for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) { + sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder())); + } + sd.setSortCols(sortCols); + if (proto.hasSkewedInfo()) { + SkewedInfo skewed = new SkewedInfo(); + skewed + .setSkewedColNames(new ArrayList<>(proto.getSkewedInfo().getSkewedColNamesList())); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueList innerList : + proto.getSkewedInfo().getSkewedColValuesList()) { + skewed.addToSkewedColValues(new ArrayList<>(innerList.getSkewedColValueList())); + } + Map, String> colMaps = new HashMap<>(); + for (HbaseMetastoreProto.StorageDescriptor.SkewedInfo.SkewedColValueLocationMap map : + proto.getSkewedInfo().getSkewedColValueLocationMapsList()) { + colMaps.put(new ArrayList<>(map.getKeyList()), map.getValue()); + } + skewed.setSkewedColValueLocationMaps(colMaps); + sd.setSkewedInfo(skewed); + } + if (proto.hasStoredAsSubDirectories()) { + sd.setStoredAsSubDirectories(proto.getStoredAsSubDirectories()); + } + return sd; + } + + static List getPartitionKeyTypes(List parts) { + com.google.common.base.Function fieldSchemaToType = + new com.google.common.base.Function() { + public String apply(FieldSchema fs) { return fs.getType(); } + }; + return Lists.transform(parts, fieldSchemaToType); + } + + static List getPartitionNames(List parts) { + com.google.common.base.Function fieldSchemaToName = + new com.google.common.base.Function() { + public String apply(FieldSchema fs) { return fs.getName(); } + }; + return Lists.transform(parts, fieldSchemaToName); + } + + /** + * Serialize a partition + * @param part partition object + * @param sdHash hash that is being used as a key for the enclosed storage descriptor + * @return First element is the key, second is the serialized partition + */ + static byte[][] serializePartition(Partition part, List partTypes, byte[] sdHash) { + byte[][] result = new byte[2][]; + result[0] = buildPartitionKey(part.getDbName(), part.getTableName(), partTypes, part.getValues()); + HbaseMetastoreProto.Partition.Builder builder = HbaseMetastoreProto.Partition.newBuilder(); + builder + .setCreateTime(part.getCreateTime()) + .setLastAccessTime(part.getLastAccessTime()); + if (part.getSd().getLocation() != null) builder.setLocation(part.getSd().getLocation()); + if (part.getSd().getParameters() != null) { + builder.setSdParameters(buildParameters(part.getSd().getParameters())); + } + builder.setSdHash(ByteString.copyFrom(sdHash)); + if (part.getParameters() != null) builder.setParameters(buildParameters(part.getParameters())); + result[1] = builder.build().toByteArray(); + return result; + } + + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals) { + return buildPartitionKey(dbName, tableName, partTypes, partVals, false); + } + + static byte[] buildPartitionKey(String dbName, String tableName, List partTypes, List partVals, boolean endPrefix) { + Object[] components = new Object[partVals.size()]; + for (int i=0;i partTypes, Object[] components, boolean endPrefix) { + ObjectInspector javaStringOI = + PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING); + Object[] data = new Object[components.length+2]; + List fois = new ArrayList(components.length+2); + boolean[] endPrefixes = new boolean[components.length+2]; + + data[0] = dbName; + fois.add(javaStringOI); + endPrefixes[0] = false; + data[1] = tableName; + fois.add(javaStringOI); + endPrefixes[1] = false; + + for (int i = 0; i < components.length; i++) { + data[i+2] = components[i]; + TypeInfo expectedType = + TypeInfoUtils.getTypeInfoFromTypeString(partTypes.get(i)); + ObjectInspector outputOI = + TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType); + fois.add(outputOI); + } + Output output = new Output(); + try { + BinarySortableSerDeWithEndPrefix.serializeStruct(output, data, fois, endPrefix); + } catch (SerDeException e) { + throw new RuntimeException("Cannot serialize partition " + StringUtils.join(components, ",")); + } + return Arrays.copyOf(output.getData(), output.getLength()); + } + + static class StorageDescriptorParts { + byte[] sdHash; + String location; + Map parameters; + Partition containingPartition; + Table containingTable; + } + + static void assembleStorageDescriptor(StorageDescriptor sd, StorageDescriptorParts parts) { + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setLocation(parts.location); + ssd.setParameters(parts.parameters); + ssd.setShared(sd); + if (parts.containingPartition != null) { + parts.containingPartition.setSd(ssd); + } else if (parts.containingTable != null) { + parts.containingTable.setSd(ssd); + } else { + throw new RuntimeException("Need either a partition or a table"); + } + } + + /** + * Deserialize a partition. This version should be used when the partition key is not already + * known (eg a scan). + * @param key the key fetched from HBase + * @param serialized the value fetched from HBase + * @return A struct that contains the partition plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializePartition(String dbName, String tableName, List partitions, + byte[] key, byte[] serialized, Configuration conf) throws InvalidProtocolBufferException { + List keys = deserializePartitionKey(partitions, key, conf); + return deserializePartition(dbName, tableName, keys, serialized); + } + + /** + * Deserialize a partition. This version should be used when the partition key is + * known (eg a get). + * @param dbName database name + * @param tableName table name + * @param partVals partition values + * @param serialized the value fetched from HBase + * @return A struct that contains the partition plus parts of the storage descriptor + */ + static StorageDescriptorParts deserializePartition(String dbName, String tableName, + List partVals, byte[] serialized) + throws InvalidProtocolBufferException { + HbaseMetastoreProto.Partition proto = HbaseMetastoreProto.Partition.parseFrom(serialized); + Partition part = new Partition(); + StorageDescriptorParts sdParts = new StorageDescriptorParts(); + sdParts.containingPartition = part; + part.setDbName(dbName); + part.setTableName(tableName); + part.setValues(partVals); + part.setCreateTime((int)proto.getCreateTime()); + part.setLastAccessTime((int)proto.getLastAccessTime()); + if (proto.hasLocation()) sdParts.location = proto.getLocation(); + if (proto.hasSdParameters()) sdParts.parameters = buildParameters(proto.getSdParameters()); + sdParts.sdHash = proto.getSdHash().toByteArray(); + if (proto.hasParameters()) part.setParameters(buildParameters(proto.getParameters())); + return sdParts; + } + + private static String[] deserializeKey(byte[] key) { + String k = new String(key, ENCODING); + return k.split(KEY_SEPARATOR_STR); + } + + static List deserializePartitionKey(List partitions, byte[] key, + Configuration conf) { + StringBuffer names = new StringBuffer(); + names.append("dbName,tableName,"); + StringBuffer types = new StringBuffer(); + types.append("string,string,"); + for (int i=0;i partitionKeys = new ArrayList(); + for (int i=0;i bits = new ArrayList<>(bitSet.length); + for (int i = 0; i < bitSet.length; i++) bits.add(bitSet[i]); + HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter protoBloom = + HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder() + .setNumBits(bloom.getBitSize()) + .setNumFuncs(bloom.getNumHashFunctions()) + .addAllBits(bits) + .build(); + + HbaseMetastoreProto.AggrStatsBloomFilter proto = + HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder() + .setDbName(ByteString.copyFrom(dbName.getBytes(ENCODING))) + .setTableName(ByteString.copyFrom(tableName.getBytes(ENCODING))) + .setBloomFilter(protoBloom) + .setAggregatedAt(System.currentTimeMillis()) + .build(); + + return proto.toByteArray(); + } + + private static HbaseMetastoreProto.ColumnStats + protoBufStatsForOneColumn(ColumnStatistics partitionColumnStats, ColumnStatisticsObj colStats) + throws IOException { + HbaseMetastoreProto.ColumnStats.Builder builder = HbaseMetastoreProto.ColumnStats.newBuilder(); + if (partitionColumnStats != null) { + builder.setLastAnalyzed(partitionColumnStats.getStatsDesc().getLastAnalyzed()); + } + assert colStats.getColType() != null; + builder.setColumnType(colStats.getColType()); + assert colStats.getColName() != null; + builder.setColumnName(colStats.getColName()); + + ColumnStatisticsData colData = colStats.getStatsData(); + switch (colData.getSetField()) { + case BOOLEAN_STATS: + BooleanColumnStatsData boolData = colData.getBooleanStats(); + builder.setNumNulls(boolData.getNumNulls()); + builder.setBoolStats( + HbaseMetastoreProto.ColumnStats.BooleanStats.newBuilder() + .setNumTrues(boolData.getNumTrues()) + .setNumFalses(boolData.getNumFalses()) + .build()); + break; + + case LONG_STATS: + LongColumnStatsData longData = colData.getLongStats(); + builder.setNumNulls(longData.getNumNulls()); + builder.setNumDistinctValues(longData.getNumDVs()); + builder.setLongStats( + HbaseMetastoreProto.ColumnStats.LongStats.newBuilder() + .setLowValue(longData.getLowValue()) + .setHighValue(longData.getHighValue()) + .build()); + break; + + case DOUBLE_STATS: + DoubleColumnStatsData doubleData = colData.getDoubleStats(); + builder.setNumNulls(doubleData.getNumNulls()); + builder.setNumDistinctValues(doubleData.getNumDVs()); + builder.setDoubleStats( + HbaseMetastoreProto.ColumnStats.DoubleStats.newBuilder() + .setLowValue(doubleData.getLowValue()) + .setHighValue(doubleData.getHighValue()) + .build()); + break; + + case STRING_STATS: + StringColumnStatsData stringData = colData.getStringStats(); + builder.setNumNulls(stringData.getNumNulls()); + builder.setNumDistinctValues(stringData.getNumDVs()); + builder.setStringStats( + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + .setMaxColLength(stringData.getMaxColLen()) + .setAvgColLength(stringData.getAvgColLen()) + .build()); + break; + + case BINARY_STATS: + BinaryColumnStatsData binaryData = colData.getBinaryStats(); + builder.setNumNulls(binaryData.getNumNulls()); + builder.setBinaryStats( + HbaseMetastoreProto.ColumnStats.StringStats.newBuilder() + .setMaxColLength(binaryData.getMaxColLen()) + .setAvgColLength(binaryData.getAvgColLen()) + .build()); + break; + + case DECIMAL_STATS: + DecimalColumnStatsData decimalData = colData.getDecimalStats(); + builder.setNumNulls(decimalData.getNumNulls()); + builder.setNumDistinctValues(decimalData.getNumDVs()); + builder.setDecimalStats( + HbaseMetastoreProto.ColumnStats.DecimalStats.newBuilder() + .setLowValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getLowValue().getUnscaled())) + .setScale(decimalData.getLowValue().getScale()) + .build()) + .setHighValue( + HbaseMetastoreProto.ColumnStats.DecimalStats.Decimal.newBuilder() + .setUnscaled(ByteString.copyFrom(decimalData.getHighValue().getUnscaled())) + .setScale(decimalData.getHighValue().getScale()) + .build())) + .build(); + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + return builder.build(); + } + + static byte[] serializeStatsForOneColumn(ColumnStatistics partitionColumnStats, + ColumnStatisticsObj colStats) throws IOException { + return protoBufStatsForOneColumn(partitionColumnStats, colStats).toByteArray(); + } + + static ColumnStatisticsObj deserializeStatsForOneColumn(ColumnStatistics partitionColumnStats, + byte[] bytes) throws IOException { + HbaseMetastoreProto.ColumnStats proto = HbaseMetastoreProto.ColumnStats.parseFrom(bytes); + return statsForOneColumnFromProtoBuf(partitionColumnStats, proto); + } + + private static ColumnStatisticsObj + statsForOneColumnFromProtoBuf(ColumnStatistics partitionColumnStats, + HbaseMetastoreProto.ColumnStats proto) throws IOException { + ColumnStatisticsObj colStats = new ColumnStatisticsObj(); + long lastAnalyzed = proto.getLastAnalyzed(); + if (partitionColumnStats != null) { + partitionColumnStats.getStatsDesc().setLastAnalyzed( + Math.max(lastAnalyzed, partitionColumnStats.getStatsDesc().getLastAnalyzed())); + } + colStats.setColType(proto.getColumnType()); + colStats.setColName(proto.getColumnName()); + + ColumnStatisticsData colData = new ColumnStatisticsData(); + if (proto.hasBoolStats()) { + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(proto.getBoolStats().getNumTrues()); + boolData.setNumFalses(proto.getBoolStats().getNumFalses()); + boolData.setNumNulls(proto.getNumNulls()); + colData.setBooleanStats(boolData); + } else if (proto.hasLongStats()) { + LongColumnStatsData longData = new LongColumnStatsData(); + if (proto.getLongStats().hasLowValue()) { + longData.setLowValue(proto.getLongStats().getLowValue()); + } + if (proto.getLongStats().hasHighValue()) { + longData.setHighValue(proto.getLongStats().getHighValue()); + } + longData.setNumNulls(proto.getNumNulls()); + longData.setNumDVs(proto.getNumDistinctValues()); + colData.setLongStats(longData); + } else if (proto.hasDoubleStats()) { + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); + if (proto.getDoubleStats().hasLowValue()) { + doubleData.setLowValue(proto.getDoubleStats().getLowValue()); + } + if (proto.getDoubleStats().hasHighValue()) { + doubleData.setHighValue(proto.getDoubleStats().getHighValue()); + } + doubleData.setNumNulls(proto.getNumNulls()); + doubleData.setNumDVs(proto.getNumDistinctValues()); + colData.setDoubleStats(doubleData); + } else if (proto.hasStringStats()) { + StringColumnStatsData stringData = new StringColumnStatsData(); + stringData.setMaxColLen(proto.getStringStats().getMaxColLength()); + stringData.setAvgColLen(proto.getStringStats().getAvgColLength()); + stringData.setNumNulls(proto.getNumNulls()); + stringData.setNumDVs(proto.getNumDistinctValues()); + colData.setStringStats(stringData); + } else if (proto.hasBinaryStats()) { + BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); + binaryData.setMaxColLen(proto.getBinaryStats().getMaxColLength()); + binaryData.setAvgColLen(proto.getBinaryStats().getAvgColLength()); + binaryData.setNumNulls(proto.getNumNulls()); + colData.setBinaryStats(binaryData); + } else if (proto.hasDecimalStats()) { + DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); + if (proto.getDecimalStats().hasHighValue()) { + Decimal hiVal = new Decimal(); + hiVal.setUnscaled(proto.getDecimalStats().getHighValue().getUnscaled().toByteArray()); + hiVal.setScale((short) proto.getDecimalStats().getHighValue().getScale()); + decimalData.setHighValue(hiVal); + } + if (proto.getDecimalStats().hasLowValue()) { + Decimal loVal = new Decimal(); + loVal.setUnscaled(proto.getDecimalStats().getLowValue().getUnscaled().toByteArray()); + loVal.setScale((short) proto.getDecimalStats().getLowValue().getScale()); + decimalData.setLowValue(loVal); + } + decimalData.setNumNulls(proto.getNumNulls()); + decimalData.setNumDVs(proto.getNumDistinctValues()); + colData.setDecimalStats(decimalData); + } else { + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + colStats.setStatsData(colData); + return colStats; + } + + static byte[] serializeAggrStats(AggrStats aggrStats) throws IOException { + List protoColStats = + new ArrayList<>(aggrStats.getColStatsSize()); + for (ColumnStatisticsObj cso : aggrStats.getColStats()) { + protoColStats.add(protoBufStatsForOneColumn(null, cso)); + } + return HbaseMetastoreProto.AggrStats.newBuilder() + .setPartsFound(aggrStats.getPartsFound()) + .addAllColStats(protoColStats) + .build() + .toByteArray(); + } + + static AggrStats deserializeAggrStats(byte[] serialized) throws IOException { + HbaseMetastoreProto.AggrStats protoAggrStats = + HbaseMetastoreProto.AggrStats.parseFrom(serialized); + AggrStats aggrStats = new AggrStats(); + aggrStats.setPartsFound(protoAggrStats.getPartsFound()); + for (HbaseMetastoreProto.ColumnStats protoCS : protoAggrStats.getColStatsList()) { + aggrStats.addToColStats(statsForOneColumnFromProtoBuf(null, protoCS)); + } + return aggrStats; + } + + /** + * Serialize a delegation token + * @param tokenIdentifier + * @param delegationToken + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeDelegationToken(String tokenIdentifier, String delegationToken) { + byte[][] result = new byte[2][]; + result[0] = buildKey(tokenIdentifier); + result[1] = HbaseMetastoreProto.DelegationToken.newBuilder() + .setTokenStr(delegationToken) + .build() + .toByteArray(); + return result; + } + + /** + * Deserialize a delegation token. + * @param value value fetched from hbase + * @return A delegation token. + * @throws InvalidProtocolBufferException + */ + static String deserializeDelegationToken(byte[] value) throws InvalidProtocolBufferException { + HbaseMetastoreProto.DelegationToken protoToken = + HbaseMetastoreProto.DelegationToken.parseFrom(value); + return protoToken.getTokenStr(); + } + + /** + * Serialize a master key + * @param seqNo + * @param key + * @return two byte arrays, first contains the key, the second the serialized value. + */ + static byte[][] serializeMasterKey(Integer seqNo, String key) { + byte[][] result = new byte[2][]; + result[0] = buildKey(seqNo.toString()); + result[1] = HbaseMetastoreProto.MasterKey.newBuilder() + .setMasterKey(key) + .build() + .toByteArray(); + return result; + } + + /** + * Deserialize a master key. + * @param value value fetched from hbase + * @return A master key + * @throws InvalidProtocolBufferException + */ + static String deserializeMasterKey(byte[] value) throws InvalidProtocolBufferException { + HbaseMetastoreProto.MasterKey protoKey = HbaseMetastoreProto.MasterKey.parseFrom(value); + return protoKey.getMasterKey(); + } + + /** + * @param keyStart byte array representing the start prefix + * @return byte array corresponding to the next possible prefix + */ + static byte[] getEndPrefix(byte[] keyStart) { + if (keyStart == null) { + return null; + } + // Since this is a prefix and not full key, the usual hbase technique of + // appending 0 byte does not work. Instead of that, increment the last byte. + byte[] keyEnd = Arrays.copyOf(keyStart, keyStart.length); + keyEnd[keyEnd.length - 1]++; + return keyEnd; + } + + static byte[] makeLongKey(long v) { + byte[] b = new byte[8]; + b[0] = (byte)(v >>> 56); + b[1] = (byte)(v >>> 48); + b[2] = (byte)(v >>> 40); + b[3] = (byte)(v >>> 32); + b[4] = (byte)(v >>> 24); + b[5] = (byte)(v >>> 16); + b[6] = (byte)(v >>> 8); + b[7] = (byte)(v >>> 0); + return b; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java new file mode 100644 index 0000000..5bbed5d --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/ObjectCache.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.HashMap; +import java.util.Map; + +/** + * A generic class for caching objects obtained from HBase. Currently a set of + * convenience methods around a {@link java.util.HashMap} with a max size but built + * as a separate class in case we want to switch out the implementation to something more + * efficient. The cache has a max size; when this is exceeded any additional entries are dropped + * on the floor. + * + * This cache is local to a particular thread and thus is not synchronized. It is intended to be + * flushed before a query begins to make sure it doesn't carry old versions of objects between + * queries (that is, an object may have changed between two queries, we want to get the newest + * version). + */ +class ObjectCache { + private Map cache; + private final int maxSize; + private Counter hits; + private Counter misses; + private Counter overflows; + + /** + * + * @param max maximum number of objects to store in the cache. When max is reached, eviction + * policy is MRU. + * @param hits counter to increment when we find an element in the cache + * @param misses counter to increment when we do not find an element in the cache + * @param overflows counter to increment when we do not have room for an element in the cache + */ + ObjectCache(int max, Counter hits, Counter misses, Counter overflows) { + maxSize = max; + cache = new HashMap(); + this.hits = hits; + this.misses = misses; + this.overflows = overflows; + } + + void put(K key, V value) { + if (cache.size() < maxSize) { + cache.put(key, value); + } else { + overflows.incr(); + } + } + + V get(K key) { + V val = cache.get(key); + if (val == null) misses.incr(); + else hits.incr(); + return val; + } + + void remove(K key) { + cache.remove(key); + } + + void flush() { + cache.clear(); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java new file mode 100644 index 0000000..08d060f --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionCache.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.metastore.api.Partition; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A cache for partition objects. This is separate from + * {@link org.apache.hadoop.hive.metastore.hbase.ObjectCache} because we need to access it + * differently (always by table) and because we need to be able to track whether we are caching + * all of the partitions for a table or not. Like ObjectCache it is local to a particular thread + * and thus not synchronized. Also like ObjectCache it is intended to be flushed before each query. + */ +class PartitionCache { + // This is a trie. The key to the first map is (dbname, tablename), since partitions are + // always accessed within the context of the table they belong to. The second map maps + // partition values (not names) to partitions. + private Map, TrieValue> cache; + private final int maxSize; + private int cacheSize; + private Counter misses; + private Counter hits; + private Counter overflows; + + /** + * + * @param max maximum number of objects to store in the cache. When max is reached, eviction + * policy is MRU. + * @param hits counter to increment when we find an element in the cache + * @param misses counter to increment when we do not find an element in the cache + * @param overflows counter to increment when we do not have room for an element in the cache + */ + PartitionCache(int max, Counter hits, Counter misses, Counter overflows) { + maxSize = max; + cache = new HashMap, TrieValue>(); + cacheSize = 0; + this.hits = hits; + this.misses = misses; + this.overflows = overflows; + } + + /** + * Put a single partition into the cache + * @param dbName + * @param tableName + * @param part + */ + void put(String dbName, String tableName, Partition part) { + if (cacheSize < maxSize) { + ObjectPair key = new ObjectPair(dbName, tableName); + TrieValue entry = cache.get(key); + if (entry == null) { + entry = new TrieValue(false); + cache.put(key, entry); + } + entry.map.put(part.getValues(), part); + cacheSize++; + } else { + overflows.incr(); + } + } + + /** + * + * @param dbName + * @param tableName + * @param parts + * @param allForTable if true indicates that all partitions for this table are present + */ + void put(String dbName, String tableName, List parts, boolean allForTable) { + if (cacheSize + parts.size() < maxSize) { + ObjectPair key = new ObjectPair(dbName, tableName); + TrieValue entry = cache.get(key); + if (entry == null) { + entry = new TrieValue(allForTable); + cache.put(key, entry); + } + for (Partition part : parts) entry.map.put(part.getValues(), part); + cacheSize += parts.size(); + } else { + overflows.incr(); + } + } + + /** + * Will only return a value if all partitions for this table are in the cache. Otherwise you + * should call {@link #get} individually + * @param dbName + * @param tableName + * @return + */ + Collection getAllForTable(String dbName, String tableName) { + TrieValue entry = cache.get(new ObjectPair(dbName, tableName)); + if (entry != null && entry.hasAllPartitionsForTable) { + hits.incr(); + return entry.map.values(); + } else { + misses.incr(); + return null; + } + } + + Partition get(String dbName, String tableName, List partVals) { + TrieValue entry = cache.get(new ObjectPair(dbName, tableName)); + if (entry != null) { + hits.incr(); + return entry.map.get(partVals); + } else { + misses.incr(); + return null; + } + } + + void remove(String dbName, String tableName) { + ObjectPair key = new ObjectPair(dbName, tableName); + TrieValue entry = cache.get(key); + if (entry != null) { + cacheSize -= entry.map.size(); + cache.remove(key); + } + } + + void remove(String dbName, String tableName, List partVals) { + ObjectPair key = new ObjectPair(dbName, tableName); + TrieValue entry = cache.get(key); + if (entry != null && entry.map.remove(partVals) != null) { + cacheSize--; + entry.hasAllPartitionsForTable = false; + } + } + + void flush() { + cache.clear(); + cacheSize = 0; + } + + static class TrieValue { + boolean hasAllPartitionsForTable; + Map, Partition> map; + + TrieValue(boolean hasAll) { + hasAllPartitionsForTable = hasAll; + map = new HashMap, Partition>(); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java new file mode 100644 index 0000000..01fe403 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/PartitionKeyComparator.java @@ -0,0 +1,292 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; + +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PartitionKeyComparator.Operator.Type; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.BytesWritable; + +import com.google.protobuf.InvalidProtocolBufferException; + +public class PartitionKeyComparator extends ByteArrayComparable { + private static final Log LOG = LogFactory.getLog(PartitionKeyComparator.class); + static class Mark { + Mark(String value, boolean inclusive) { + this.value = value; + this.inclusive = inclusive; + } + String value; + boolean inclusive; + public String toString() { + return value + (inclusive?"_":""); + } + } + static class Range { + Range(String keyName, Mark start, Mark end) { + this.keyName = keyName; + this.start = start; + this.end = end; + } + String keyName; + Mark start; + Mark end; + public String toString() { + return "" + keyName + ":" + (start!=null?start.toString():"") + (end!=null?end.toString():""); + } + } + // Cache the information derived from ranges for performance, including + // range in native datatype + static class NativeRange { + int pos; + Comparable start; + Comparable end; + } + static class Operator { + public Operator(Type type, String keyName, String val) { + this.type = type; + this.keyName = keyName; + this.val = val; + } + enum Type { + LIKE, NOTEQUALS + }; + Type type; + String keyName; + String val; + } + static class NativeOperator { + int pos; + Comparable val; + } + String names; + String types; + List ranges; + List nativeRanges; + List ops; + List nativeOps; + Properties serdeProps; + public PartitionKeyComparator(String names, String types, List ranges, List ops) { + super(null); + this.names = names; + this.types = types; + this.ranges = ranges; + this.ops = ops; + serdeProps = new Properties(); + serdeProps.setProperty(serdeConstants.LIST_COLUMNS, "dbName,tableName," + names); + serdeProps.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string," + types); + + this.nativeRanges = new ArrayList(this.ranges.size()); + for (int i=0;i(this.ops.size()); + for (int i=0;i ranges = new ArrayList(); + for (HbaseMetastoreProto.PartitionKeyComparator.Range range : proto.getRangeList()) { + Mark start = null; + if (range.hasStart()) { + start = new Mark(range.getStart().getValue(), range.getStart().getInclusive()); + } + Mark end = null; + if (range.hasEnd()) { + end = new Mark(range.getEnd().getValue(), range.getEnd().getInclusive()); + } + ranges.add(new Range(range.getKey(), start, end)); + } + List ops = new ArrayList(); + for (HbaseMetastoreProto.PartitionKeyComparator.Operator op : proto.getOpList()) { + ops.add(new Operator(Operator.Type.valueOf(op.getType().name()), op.getKey(), + op.getVal())); + } + return new PartitionKeyComparator(proto.getNames(), proto.getTypes(), ranges, ops); + } + + @Override + public byte[] toByteArray() { + HbaseMetastoreProto.PartitionKeyComparator.Builder builder = + HbaseMetastoreProto.PartitionKeyComparator.newBuilder(); + builder.setNames(names); + builder.setTypes(types); + for (int i=0;i=0 || + !range.start.inclusive && partVal.compareTo(nativeRange.start)>0) { + if (range.end == null || range.end.inclusive && partVal.compareTo(nativeRange.end)<=0 || + !range.end.inclusive && partVal.compareTo(nativeRange.end)<0) { + continue; + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Fail to match range " + range.keyName + "-" + partVal + "[" + nativeRange.start + + "," + nativeRange.end + "]"); + } + return 1; + } + + for (int i=0;i getCols() { + copyCols(); + return super.getCols(); + } + + @Override + public void setCols(List cols) { + colsCopied = true; + super.setCols(cols); + } + + @Override + public void unsetCols() { + colsCopied = true; + super.unsetCols(); + } + + @Override + public Iterator getColsIterator() { + copyCols(); + return super.getColsIterator(); + } + + private void copyCols() { + if (!colsCopied) { + colsCopied = true; + if (super.getCols() != null) { + List cols = new ArrayList(super.getColsSize()); + for (FieldSchema fs : super.getCols()) cols.add(new FieldSchema(fs)); + super.setCols(cols); + } + } + } + + @Override + public SerDeInfo getSerdeInfo() { + copySerde(); + return super.getSerdeInfo(); + } + + @Override + public void setSerdeInfo(SerDeInfo serdeInfo) { + serdeCopied = true; + super.setSerdeInfo(serdeInfo); + } + + @Override + public void unsetSerdeInfo() { + serdeCopied = true; + super.unsetSerdeInfo(); + } + + private void copySerde() { + if (!serdeCopied) { + serdeCopied = true; + if (super.getSerdeInfo() != null) super.setSerdeInfo(new SerDeInfo(super.getSerdeInfo())); + } + } + + @Override + public void addToBucketCols(String bucket) { + copyBucketCols(); + super.addToBucketCols(bucket); + } + + @Override + public List getBucketCols() { + copyBucketCols(); + return super.getBucketCols(); + } + + @Override + public void setBucketCols(List buckets) { + bucketsCopied = true; + super.setBucketCols(buckets); + } + + @Override + public void unsetBucketCols() { + bucketsCopied = true; + super.unsetBucketCols(); + } + + @Override + public Iterator getBucketColsIterator() { + copyBucketCols(); + return super.getBucketColsIterator(); + } + + private void copyBucketCols() { + if (!bucketsCopied) { + bucketsCopied = true; + if (super.getBucketCols() != null) { + List buckets = new ArrayList(super.getBucketColsSize()); + for (String bucket : super.getBucketCols()) buckets.add(bucket); + super.setBucketCols(buckets); + } + } + } + + @Override + public void addToSortCols(Order sort) { + copySort(); + super.addToSortCols(sort); + } + + @Override + public List getSortCols() { + copySort(); + return super.getSortCols(); + } + + @Override + public void setSortCols(List sorts) { + sortCopied = true; + super.setSortCols(sorts); + } + + @Override + public void unsetSortCols() { + sortCopied = true; + super.unsetSortCols(); + } + + @Override + public Iterator getSortColsIterator() { + copySort(); + return super.getSortColsIterator(); + } + + private void copySort() { + if (!sortCopied) { + sortCopied = true; + if (super.getSortCols() != null) { + List sortCols = new ArrayList(super.getSortColsSize()); + for (Order sortCol : super.getSortCols()) sortCols.add(new Order(sortCol)); + super.setSortCols(sortCols); + } + } + } + + @Override + public SkewedInfo getSkewedInfo() { + copySkewed(); + return super.getSkewedInfo(); + } + + @Override + public void setSkewedInfo(SkewedInfo skewedInfo) { + skewedCopied = true; + super.setSkewedInfo(skewedInfo); + } + + @Override + public void unsetSkewedInfo() { + skewedCopied = true; + super.unsetSkewedInfo(); + } + + private void copySkewed() { + if (!skewedCopied) { + skewedCopied = true; + if (super.getSkewedInfo() != null) super.setSkewedInfo(new SkewedInfo(super.getSkewedInfo())); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java new file mode 100644 index 0000000..42efe94 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java @@ -0,0 +1,326 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.protobuf.ByteString; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregator; +import org.apache.hadoop.hive.metastore.hbase.stats.ColumnStatsAggregatorFactory; + +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * A cache for stats. This is only intended for use by + * {@link org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite} and should not be used outside + * that class. + */ +class StatsCache { + + private static final Log LOG = LogFactory.getLog(StatsCache.class.getName()); + private static StatsCache self = null; + + private LoadingCache cache; + private Invalidator invalidator; + private long runInvalidatorEvery; + private long maxTimeInCache; + private boolean invalidatorHasRun; + + @VisibleForTesting Counter misses; + @VisibleForTesting Counter hbaseHits; + @VisibleForTesting Counter totalGets; + + static synchronized StatsCache getInstance(Configuration conf) { + if (self == null) { + self = new StatsCache(conf); + } + return self; + } + + private StatsCache(Configuration conf) { + final StatsCache me = this; + cache = CacheBuilder.newBuilder() + .maximumSize( + HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES)) + .expireAfterWrite(HiveConf.getTimeVar(conf, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, TimeUnit.SECONDS), TimeUnit.SECONDS) + .build(new CacheLoader() { + @Override + public AggrStats load(StatsCacheKey key) throws Exception { + HBaseReadWrite hrw = HBaseReadWrite.getInstance(); + AggrStats aggrStats = hrw.getAggregatedStats(key.hashed); + if (aggrStats == null) { + misses.incr(); + ColumnStatsAggregator aggregator = null; + ColumnStatisticsObj statsObj = null; + aggrStats = new AggrStats(); + LOG.debug("Unable to find aggregated stats for " + key.colName + ", aggregating"); + List css = hrw.getPartitionStatistics(key.dbName, key.tableName, + key.partNames, HBaseStore.partNameListToValsList(key.partNames), + Collections.singletonList(key.colName)); + if (css != null && css.size() > 0) { + aggrStats.setPartsFound(css.size()); + for (ColumnStatistics cs : css) { + for (ColumnStatisticsObj cso : cs.getStatsObj()) { + if (statsObj == null) { + statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(key.colName, + cso.getColType(), cso.getStatsData().getSetField()); + } + if (aggregator == null) { + aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator( + cso.getStatsData().getSetField()); + } + aggregator.aggregate(statsObj, cso); + } + } + aggrStats.addToColStats(statsObj); + me.put(key, aggrStats); + } + } else { + hbaseHits.incr(); + } + return aggrStats; + } + }); + misses = new Counter("Stats cache table misses"); + hbaseHits = new Counter("Stats cache table hits"); + totalGets = new Counter("Total get calls to the stats cache"); + + maxTimeInCache = HiveConf.getTimeVar(conf, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, TimeUnit.SECONDS); + // We want runEvery in milliseconds, even though we give the default value in the conf in + // seconds. + runInvalidatorEvery = HiveConf.getTimeVar(conf, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, TimeUnit.MILLISECONDS); + + invalidator = new Invalidator(); + invalidator.setDaemon(true); + invalidator.start(); + } + + /** + * Add an object to the cache. + * @param key Key for this entry + * @param aggrStats stats + * @throws java.io.IOException + */ + void put(StatsCacheKey key, AggrStats aggrStats) throws IOException { + HBaseReadWrite.getInstance().putAggregatedStats(key.hashed, key.dbName, key.tableName, + key.partNames, + key.colName, aggrStats); + cache.put(key, aggrStats); + } + + /** + * Get partition level statistics + * @param dbName name of database table is in + * @param tableName name of table + * @param partNames names of the partitions + * @param colName of column to get stats for + * @return stats object for this column, or null if none cached + * @throws java.io.IOException + */ + + AggrStats get(String dbName, String tableName, List partNames, String colName) + throws IOException { + totalGets.incr(); + StatsCacheKey key = new StatsCacheKey(dbName, tableName, partNames, colName); + try { + return cache.get(key); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + + /** + * Remove all entries that are related to a particular set of partitions. This should be + * called when partitions are deleted or stats are updated. + * @param dbName name of database table is in + * @param tableName name of table + * @param partName name of the partition + * @throws IOException + */ + void invalidate(String dbName, String tableName, String partName) + throws IOException { + invalidator.addToQueue( + HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry.newBuilder() + .setDbName(ByteString.copyFrom(dbName.getBytes(HBaseUtils.ENCODING))) + .setTableName(ByteString.copyFrom(tableName.getBytes(HBaseUtils.ENCODING))) + .setPartName(ByteString.copyFrom(partName.getBytes(HBaseUtils.ENCODING))) + .build()); + } + + void dumpCounters() { + LOG.debug(misses.dump()); + LOG.debug(hbaseHits.dump()); + LOG.debug(totalGets.dump()); + } + + /** + * Completely dump the cache from memory, used to test that we can access stats from HBase itself. + * @throws IOException + */ + @VisibleForTesting void flushMemory() throws IOException { + cache.invalidateAll(); + } + + @VisibleForTesting void resetCounters() { + misses.clear(); + hbaseHits.clear(); + totalGets.clear(); + } + + @VisibleForTesting void setRunInvalidatorEvery(long runEvery) { + runInvalidatorEvery = runEvery; + } + + @VisibleForTesting void setMaxTimeInCache(long maxTime) { + maxTimeInCache = maxTime; + } + + @VisibleForTesting void wakeInvalidator() throws InterruptedException { + invalidatorHasRun = false; + // Wait through 2 cycles so we're sure our entry won't be picked as too new. + Thread.sleep(2 * runInvalidatorEvery); + invalidator.interrupt(); + while (!invalidatorHasRun) { + Thread.sleep(10); + } + } + + static class StatsCacheKey { + final byte[] hashed; + String dbName; + String tableName; + List partNames; + String colName; + private MessageDigest md; + + StatsCacheKey(byte[] key) { + hashed = key; + } + + StatsCacheKey(String dbName, String tableName, List partNames, String colName) { + this.dbName = dbName; + this.tableName = tableName; + this.partNames = partNames; + this.colName = colName; + + try { + md = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + md.update(dbName.getBytes(HBaseUtils.ENCODING)); + md.update(tableName.getBytes(HBaseUtils.ENCODING)); + Collections.sort(this.partNames); + for (String s : partNames) { + md.update(s.getBytes(HBaseUtils.ENCODING)); + } + md.update(colName.getBytes(HBaseUtils.ENCODING)); + hashed = md.digest(); + } + + @Override + public boolean equals(Object other) { + if (other == null || !(other instanceof StatsCacheKey)) return false; + StatsCacheKey that = (StatsCacheKey)other; + return Arrays.equals(hashed, that.hashed); + } + + @Override + public int hashCode() { + return Arrays.hashCode(hashed); + } + } + + private class Invalidator extends Thread { + private List entries = new ArrayList<>(); + private Lock lock = new ReentrantLock(); + + void addToQueue(HbaseMetastoreProto.AggrStatsInvalidatorFilter.Entry entry) { + lock.lock(); + try { + entries.add(entry); + } finally { + lock.unlock(); + } + } + + @Override + public void run() { + while (true) { + long startedAt = System.currentTimeMillis(); + List thisRun = null; + lock.lock(); + try { + if (entries.size() > 0) { + thisRun = entries; + entries = new ArrayList<>(); + } + } finally { + lock.unlock(); + } + + if (thisRun != null) { + try { + HbaseMetastoreProto.AggrStatsInvalidatorFilter filter = + HbaseMetastoreProto.AggrStatsInvalidatorFilter.newBuilder() + .setRunEvery(runInvalidatorEvery) + .setMaxCacheEntryLife(maxTimeInCache) + .addAllToInvalidate(thisRun) + .build(); + List keys = + HBaseReadWrite.getInstance().invalidateAggregatedStats(filter); + cache.invalidateAll(keys); + } catch (IOException e) { + // Not a lot I can do here + LOG.error("Caught error while invalidating entries in the cache", e); + } + } + invalidatorHasRun = true; + + try { + sleep(runInvalidatorEvery - (System.currentTimeMillis() - startedAt)); + } catch (InterruptedException e) { + LOG.warn("Interupted while sleeping", e); + } + } + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java new file mode 100644 index 0000000..f9c6e73 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/TephraHBaseConnection.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import co.cask.tephra.TransactionAware; +import co.cask.tephra.TransactionContext; +import co.cask.tephra.TransactionFailureException; +import co.cask.tephra.TransactionManager; +import co.cask.tephra.TransactionSystemClient; +import co.cask.tephra.distributed.ThreadLocalClientProvider; +import co.cask.tephra.distributed.TransactionServiceClient; +import co.cask.tephra.hbase10.TransactionAwareHTable; +import co.cask.tephra.hbase10.coprocessor.TransactionProcessor; +import co.cask.tephra.inmemory.InMemoryTxSystemClient; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.twill.discovery.InMemoryDiscoveryService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A class that uses Tephra for transaction management. + */ +public class TephraHBaseConnection extends VanillaHBaseConnection { + static final private Log LOG = LogFactory.getLog(TephraHBaseConnection.class.getName()); + + private Map txnTables; + private TransactionContext txn; + private TransactionSystemClient txnClient; + + TephraHBaseConnection() { + super(); + txnTables = new HashMap(); + } + + @Override + public void connect() throws IOException { + super.connect(); + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { + LOG.debug("Using an in memory client transaction system for testing"); + TransactionManager txnMgr = new TransactionManager(conf); + txnMgr.startAndWait(); + txnClient = new InMemoryTxSystemClient(txnMgr); + } else { + // TODO should enable use of ZKDiscoveryService if users want it + LOG.debug("Using real client transaction system for production"); + txnClient = new TransactionServiceClient(conf, + new ThreadLocalClientProvider(conf, new InMemoryDiscoveryService())); + } + for (String tableName : HBaseReadWrite.tableNames) { + txnTables.put(tableName, new TransactionAwareHTable(super.getHBaseTable(tableName, true))); + } + txn = new TransactionContext(txnClient, txnTables.values()); + } + + @Override + public void beginTransaction() throws IOException { + try { + txn.start(); + LOG.debug("Started txn in tephra"); + } catch (TransactionFailureException e) { + throw new IOException(e); + } + } + + @Override + public void commitTransaction() throws IOException { + try { + txn.finish(); + LOG.debug("Finished txn in tephra"); + } catch (TransactionFailureException e) { + throw new IOException(e); + } + } + + @Override + public void rollbackTransaction() throws IOException { + try { + txn.abort(); + LOG.debug("Aborted txn in tephra"); + } catch (TransactionFailureException e) { + throw new IOException(e); + } + } + + @Override + public void flush(HTableInterface htab) throws IOException { + // NO-OP as we want to flush at commit time + } + + @Override + protected HTableDescriptor buildDescriptor(String tableName, List columnFamilies) + throws IOException { + HTableDescriptor tableDesc = super.buildDescriptor(tableName, columnFamilies); + tableDesc.addCoprocessor(TransactionProcessor.class.getName()); + return tableDesc; + } + + @Override + public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { + // Ignore force, it will mess up our previous creation of the tables. + return (TransactionAwareHTable)txnTables.get(tableName); + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java new file mode 100644 index 0000000..25334a3 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/VanillaHBaseConnection.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Result; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A pass through to a simple HBase connection. This has no transactions. + */ +public class VanillaHBaseConnection implements HBaseConnection { + static final private Log LOG = LogFactory.getLog(VanillaHBaseConnection.class.getName()); + + protected HConnection conn; + protected Map tables; + protected Configuration conf; + + VanillaHBaseConnection() { + tables = new HashMap(); + } + + @Override + public void connect() throws IOException { + if (conf == null) throw new RuntimeException("Must call getConf before connect"); + conn = HConnectionManager.createConnection(conf); + } + + @Override + public void close() throws IOException { + for (HTableInterface htab : tables.values()) htab.close(); + } + + @Override + public void beginTransaction() throws IOException { + + } + + @Override + public void commitTransaction() throws IOException { + + } + + @Override + public void rollbackTransaction() throws IOException { + + } + + @Override + public void flush(HTableInterface htab) throws IOException { + htab.flushCommits(); + } + + @Override + public void createHBaseTable(String tableName, List columnFamilies) + throws IOException { + HBaseAdmin admin = new HBaseAdmin(conn); + LOG.info("Creating HBase table " + tableName); + admin.createTable(buildDescriptor(tableName, columnFamilies)); + admin.close(); + } + + protected HTableDescriptor buildDescriptor(String tableName, List columnFamilies) + throws IOException { + HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); + for (byte[] cf : columnFamilies) { + tableDesc.addFamily(new HColumnDescriptor(cf)); + } + return tableDesc; + } + + @Override + public HTableInterface getHBaseTable(String tableName) throws IOException { + return getHBaseTable(tableName, false); + } + + @Override + public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { + HTableInterface htab = tables.get(tableName); + if (htab == null) { + LOG.debug("Trying to connect to table " + tableName); + try { + htab = conn.getTable(tableName); + // Calling gettable doesn't actually connect to the region server, it's very light + // weight, so call something else so we actually reach out and touch the region server + // and see if the table is there. + if (force) htab.get(new Get("nosuchkey".getBytes(HBaseUtils.ENCODING))); + } catch (IOException e) { + LOG.info("Caught exception when table was missing"); + return null; + } + htab.setAutoFlushTo(false); + tables.put(tableName, htab); + } + return htab; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java new file mode 100644 index 0000000..bbd2c7b --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BinaryColumnStatsAggregator.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; + +public class BinaryColumnStatsAggregator implements ColumnStatsAggregator{ + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + BinaryColumnStatsData aggregateData = aggregateColStats.getStatsData().getBinaryStats(); + BinaryColumnStatsData newData = newColStats.getStatsData().getBinaryStats(); + aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); + aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java new file mode 100644 index 0000000..9047f68 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/BooleanColumnStatsAggregator.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; + +public class BooleanColumnStatsAggregator implements ColumnStatsAggregator { + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + BooleanColumnStatsData aggregateData = aggregateColStats.getStatsData().getBooleanStats(); + BooleanColumnStatsData newData = newColStats.getStatsData().getBooleanStats(); + aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); + aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java new file mode 100644 index 0000000..217b654 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregator.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; + +public interface ColumnStatsAggregator { + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats); +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java new file mode 100644 index 0000000..a8dbc1f --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData._Fields; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; + +public class ColumnStatsAggregatorFactory { + + private ColumnStatsAggregatorFactory() { + } + + public static ColumnStatsAggregator getColumnStatsAggregator(_Fields type) { + switch (type) { + case BOOLEAN_STATS: + return new BooleanColumnStatsAggregator(); + case LONG_STATS: + return new LongColumnStatsAggregator(); + case DOUBLE_STATS: + return new DoubleColumnStatsAggregator(); + case STRING_STATS: + return new StringColumnStatsAggregator(); + case BINARY_STATS: + return new BinaryColumnStatsAggregator(); + case DECIMAL_STATS: + return new DecimalColumnStatsAggregator(); + default: + throw new RuntimeException("Woh, bad. Unknown stats type " + type.toString()); + } + } + + public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) { + ColumnStatisticsObj cso = new ColumnStatisticsObj(); + ColumnStatisticsData csd = new ColumnStatisticsData(); + cso.setColName(colName); + cso.setColType(colType); + switch (type) { + case BOOLEAN_STATS: + csd.setBooleanStats(new BooleanColumnStatsData()); + break; + + case LONG_STATS: + csd.setLongStats(new LongColumnStatsData()); + break; + + case DOUBLE_STATS: + csd.setDoubleStats(new DoubleColumnStatsData()); + break; + + case STRING_STATS: + csd.setStringStats(new StringColumnStatsData()); + break; + + case BINARY_STATS: + csd.setBinaryStats(new BinaryColumnStatsData()); + break; + + case DECIMAL_STATS: + csd.setDecimalStats(new DecimalColumnStatsData()); + break; + + default: + throw new RuntimeException("Woh, bad. Unknown stats type!"); + } + + cso.setStatsData(csd); + return cso; + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java new file mode 100644 index 0000000..ec25b31 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DecimalColumnStatsAggregator.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; + +public class DecimalColumnStatsAggregator implements ColumnStatsAggregator { + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + DecimalColumnStatsData aggregateData = aggregateColStats.getStatsData().getDecimalStats(); + DecimalColumnStatsData newData = newColStats.getStatsData().getDecimalStats(); + Decimal lowValue = + (aggregateData.getLowValue().compareTo(newData.getLowValue()) > 0) ? aggregateData + .getLowValue() : newData.getLowValue(); + aggregateData.setLowValue(lowValue); + Decimal highValue = + (aggregateData.getHighValue().compareTo(newData.getHighValue()) > 0) ? aggregateData + .getHighValue() : newData.getHighValue(); + aggregateData.setHighValue(highValue); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java new file mode 100644 index 0000000..71af0ac --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/DoubleColumnStatsAggregator.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; + +public class DoubleColumnStatsAggregator implements ColumnStatsAggregator { + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + DoubleColumnStatsData aggregateData = aggregateColStats.getStatsData().getDoubleStats(); + DoubleColumnStatsData newData = newColStats.getStatsData().getDoubleStats(); + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java new file mode 100644 index 0000000..15b8cf7 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/LongColumnStatsAggregator.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; + +public class LongColumnStatsAggregator implements ColumnStatsAggregator { + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + LongColumnStatsData aggregateData = aggregateColStats.getStatsData().getLongStats(); + LongColumnStatsData newData = newColStats.getStatsData().getLongStats(); + aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); + aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/StringColumnStatsAggregator.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/StringColumnStatsAggregator.java new file mode 100644 index 0000000..fe1a04c --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/StringColumnStatsAggregator.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase.stats; + +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; + +public class StringColumnStatsAggregator implements ColumnStatsAggregator { + + @Override + public void aggregate(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { + StringColumnStatsData aggregateData = aggregateColStats.getStatsData().getStringStats(); + StringColumnStatsData newData = newColStats.getStatsData().getStringStats(); + aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen())); + aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen())); + aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); + aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs())); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 781ac63..63be7b7 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -26,9 +26,7 @@ import org.antlr.runtime.ANTLRStringStream; import org.antlr.runtime.CharStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -36,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.serde.serdeConstants; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; /** @@ -535,6 +534,12 @@ public TreeNode getRoot() { return this.root; } + @VisibleForTesting + public void setRootForTest(TreeNode tn) { + this.root = tn; + } + + /** * Adds a intermediate node of either type(AND/OR). Pops last two nodes from * the stack and sets them as children of the new node and pushes itself diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto new file mode 100644 index 0000000..0d0ef89 --- /dev/null +++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -0,0 +1,282 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +enum PrincipalType { + USER = 0; + ROLE = 1; +} + +message AggrStats { + required int64 parts_found = 1; + repeated ColumnStats col_stats = 2; +} + +message AggrStatsBloomFilter { + message BloomFilter { + required int32 num_bits = 1; + required int32 num_funcs = 2; + repeated int64 bits = 3; + } + required bytes db_name = 1; + required bytes table_name = 2; + required BloomFilter bloom_filter = 3; + required int64 aggregated_at = 4; +} + +message AggrStatsInvalidatorFilter { + message Entry { + required bytes db_name = 1; + required bytes table_name = 2; + required bytes part_name = 3; + } + + repeated Entry to_invalidate = 1; + required int64 run_every = 2; + required int64 max_cache_entry_life = 3; +} + +message ColumnStats { + + message BooleanStats { + optional int64 num_trues = 1; + optional int64 num_falses = 2; + } + + message LongStats { + optional sint64 low_value = 1; + optional sint64 high_value = 2; + } + + message DoubleStats { + optional double low_value = 1; + optional double high_value = 2; + } + + message StringStats { + optional int64 max_col_length = 1; + optional double avg_col_length = 2; + } + + message DecimalStats { + message Decimal { + required bytes unscaled = 1; + required int32 scale = 2; + } + optional Decimal low_value = 1; + optional Decimal high_value = 2; + } + + optional int64 last_analyzed = 1; + required string column_type = 2; + optional int64 num_nulls = 3; + optional int64 num_distinct_values = 4; + optional BooleanStats bool_stats = 5; + optional LongStats long_stats = 6; + optional DoubleStats double_stats = 7; + optional StringStats string_stats = 8; + optional StringStats binary_stats = 9; + optional DecimalStats decimal_stats = 10; + optional string column_name = 11; +} + +message Database { + optional string description = 1; + optional string uri = 2; + optional Parameters parameters = 3; + optional PrincipalPrivilegeSet privileges = 4; + optional string owner_name = 5; + optional PrincipalType owner_type = 6; +} + +message DelegationToken { + required string token_str = 1; +} + +message FieldSchema { + required string name = 1; + required string type = 2; + optional string comment = 3; +} + +message Function { + enum FunctionType { + JAVA = 1; + } + + message ResourceUri { + enum ResourceType { + JAR = 1; + FILE = 2; + ARCHIVE = 3; + } + required ResourceType resource_type = 1; + required string uri = 2; + } + + optional string class_name = 1; + optional string owner_name = 2; + optional PrincipalType owner_type = 3; + optional sint64 create_time = 4; + optional FunctionType function_type = 5; + repeated ResourceUri resource_uris = 6; +} + +message MasterKey { + required string master_key = 1; +} + +message ParameterEntry { + required string key = 1; + required string value = 2; +} + +message Parameters { + repeated ParameterEntry parameter = 1; +} + +message Partition { + optional int64 create_time = 1; + optional int64 last_access_time = 2; + optional string location = 3; + optional Parameters sd_parameters = 4; // storage descriptor parameters + required bytes sd_hash = 5; + optional Parameters parameters = 6; // partition parameters + // We don't support partition level privileges +} + +message PrincipalPrivilegeSetEntry { + required string principal_name = 1; + repeated PrivilegeGrantInfo privileges = 2; +} + +message PrincipalPrivilegeSet { + repeated PrincipalPrivilegeSetEntry users = 1; + repeated PrincipalPrivilegeSetEntry roles = 2; +} + +message PrivilegeGrantInfo { + optional string privilege = 1; + optional int64 create_time = 2; + optional string grantor = 3; + optional PrincipalType grantor_type = 4; + optional bool grant_option = 5; +} + +message RoleGrantInfo { + required string principal_name = 1; + required PrincipalType principal_type = 2; + optional int64 add_time = 3; + optional string grantor = 4; + optional PrincipalType grantor_type = 5; + optional bool grant_option = 6; +} + +message RoleGrantInfoList { + repeated RoleGrantInfo grant_info = 1; +} + +message RoleList { + repeated string role = 1; +} + +message Role { + optional int64 create_time = 1; + optional string owner_name = 2; +} + +message StorageDescriptor { + message Order { + required string column_name = 1; + optional sint32 order = 2 [default = 1]; + } + + message SerDeInfo { + optional string name = 1; + optional string serialization_lib = 2; + optional Parameters parameters = 3; + } + + message SkewedInfo { + message SkewedColValueList { + repeated string skewed_col_value = 1; + } + + message SkewedColValueLocationMap { + repeated string key = 1; + required string value = 2; + } + + repeated string skewed_col_names = 1; + repeated SkewedColValueList skewed_col_values = 2; + repeated SkewedColValueLocationMap skewed_col_value_location_maps = 3; + } + + repeated FieldSchema cols = 1; + optional string input_format = 2; + optional string output_format = 3; + optional bool is_compressed = 4; + optional sint32 num_buckets = 5; + optional SerDeInfo serde_info = 6; + repeated string bucket_cols = 7; + repeated Order sort_cols = 8; + optional SkewedInfo skewed_info = 9; + optional bool stored_as_sub_directories = 10; +} + +message Table { + optional string owner = 1; + optional int64 create_time = 2; + optional int64 last_access_time = 3; + optional int64 retention = 4; + optional string location = 5; + optional Parameters sd_parameters = 6; // storage descriptor parameters + required bytes sd_hash = 7; + repeated FieldSchema partition_keys = 8; + optional Parameters parameters = 9; + optional string view_original_text = 10; + optional string view_expanded_text = 11; + optional string table_type = 12; + optional PrincipalPrivilegeSet privileges = 13; + optional bool is_temporary = 14; +} + +message PartitionKeyComparator { + required string names = 1; + required string types = 2; + message Mark { + required string value = 1; + required bool inclusive = 2; + } + message Range { + required string key = 1; + optional Mark start = 2; + optional Mark end = 3; + } + message Operator { + enum Type { + LIKE = 0; + NOTEQUALS = 1; + } + required Type type = 1; + required string key = 2; + required string val = 3; + } + repeated Operator op = 3; + repeated Range range = 4; +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f184c56..0f3331a 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -18,17 +18,16 @@ package org.apache.hadoop.hive.metastore; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.SortedSet; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; @@ -44,24 +43,16 @@ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -401,44 +392,45 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa } @Override - public List listPrincipalGlobalGrants(String principalName, + public List listPrincipalGlobalGrants(String principalName, PrincipalType principalType) { return objectStore.listPrincipalGlobalGrants(principalName, principalType); } @Override - public List listPrincipalDBGrants(String principalName, + public List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName) { return objectStore.listPrincipalDBGrants(principalName, principalType, dbName); } @Override - public List listAllTableGrants(String principalName, + public List listAllTableGrants(String principalName, PrincipalType principalType, String dbName, String tableName) { return objectStore.listAllTableGrants(principalName, principalType, dbName, tableName); } @Override - public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String partName) { + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, List partValues, + String partName) { return objectStore.listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partName); + dbName, tableName, partValues, partName); } @Override - public List listPrincipalTableColumnGrants(String principalName, + public List listPrincipalTableColumnGrants(String principalName, PrincipalType principalType, String dbName, String tableName, String columnName) { return objectStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName); } @Override - public List listPrincipalPartitionColumnGrants( + public List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, - String partName, String columnName) { + List partVals, String partName, String columnName) { return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType, - dbName, tableName, partName, columnName); + dbName, tableName, partVals, partName, columnName); } @Override @@ -464,12 +456,18 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public List listRoles(String principalName, PrincipalType principalType) { + public List listRoles(String principalName, PrincipalType principalType) { return objectStore.listRoles(principalName, principalType); } @Override - public List listRoleMembers(String roleName) { + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { + return objectStore.listRolesWithGrants(principalName, principalType); + } + + @Override + public List listRoleMembers(String roleName) { return objectStore.listRoleMembers(roleName); } @@ -758,5 +756,17 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { return objectStore.getCurrentNotificationEventId(); } + @Override + public void flushCache() { + objectStore.flushCache(); + } + @Override + public ByteBuffer[] getFileMetadata(List fileIds) { + return null; + } + + @Override + public void putFileMetadata(List fileIds, List metadata) { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 83fb4bb..126a2c2 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; import java.util.Map; @@ -47,18 +48,12 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.model.MDBPrivilege; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; -import org.apache.hadoop.hive.metastore.model.MRoleMap; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; -import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -419,44 +414,45 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa } @Override - public List listPrincipalGlobalGrants(String principalName, + public List listPrincipalGlobalGrants(String principalName, PrincipalType principalType) { return Collections.emptyList(); } @Override - public List listPrincipalDBGrants(String principalName, + public List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName) { return Collections.emptyList(); } @Override - public List listAllTableGrants(String principalName, + public List listAllTableGrants(String principalName, PrincipalType principalType, String dbName, String tableName) { return Collections.emptyList(); } @Override - public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String partName) { + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, List partValues, + String partName) { return Collections.emptyList(); } @Override - public List listPrincipalTableColumnGrants(String principalName, + public List listPrincipalTableColumnGrants(String principalName, PrincipalType principalType, String dbName, String tableName, String columnName) { return Collections.emptyList(); } @Override - public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String partName, - String columnName) { + public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, List partVals, + String partName, String columnName) { return Collections.emptyList(); } @@ -488,17 +484,23 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public List listRoles(String principalName, PrincipalType principalType) { + public List listRoles(String principalName, PrincipalType principalType) { return Collections.emptyList(); } @Override - public List listRoleMembers(String roleName) { + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { return Collections.emptyList(); } @Override + public List listRoleMembers(String roleName) { + return null; + } + + @Override public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { @@ -772,6 +774,18 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } + public void flushCache() { + + } + + @Override + public ByteBuffer[] getFileMetadata(List fileIds) { + return null; + } + + @Override + public void putFileMetadata(List fileIds, List metadata) { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index a4f9f6c..78a9ea0 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -69,22 +70,13 @@ public boolean filterPartitionsByExpr(List partColumnNames, } @Before - public void setUp() { + public void setUp() throws Exception { HiveConf conf = new HiveConf(); conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName()); objectStore = new ObjectStore(); objectStore.setConf(conf); - - Deadline.registerIfNot(100000); - try { - objectStore.dropDatabase(DB1); - } catch (Exception e) { - } - try { - objectStore.dropDatabase(DB2); - } catch (Exception e) { - } + dropAllStoreObjects(objectStore); } @After @@ -227,4 +219,33 @@ public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchOb objectStore.revokeRole(role1, USER1, PrincipalType.USER, false); objectStore.removeRole(ROLE1); } + + public static void dropAllStoreObjects(RawStore store) throws MetaException, InvalidObjectException, InvalidInputException { + try { + Deadline.registerIfNot(100000); + List funcs = store.getAllFunctions(); + for (Function func : funcs) { + store.dropFunction(func.getDbName(), func.getFunctionName()); + } + List dbs = store.getAllDatabases(); + for (int i = 0; i < dbs.size(); i++) { + String db = dbs.get(i); + List tbls = store.getAllTables(db); + for (String tbl : tbls) { + Deadline.startTimer("getPartition"); + List parts = store.getPartitions(db, tbl, 100); + for (Partition part : parts) { + store.dropPartition(db, tbl, part.getValues()); + } + store.dropTable(db, tbl); + } + store.dropDatabase(db); + } + List roles = store.listRoleNames(); + for (String role : roles) { + store.removeRole(role); + } + } catch (NoSuchObjectException e) { + } + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java new file mode 100644 index 0000000..6c288f4 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; + +/** + * Mock utilities for HBaseStore testing + */ +public class MockUtils { + + /** + * The default impl is in ql package and is not available in unit tests. + */ + public static class NOOPProxy implements PartitionExpressionProxy { + + @Override + public String convertExprToFilter(byte[] expr) throws MetaException { + return null; + } + + @Override + public boolean filterPartitionsByExpr(List partColumnNames, + List partColumnTypeInfos, byte[] expr, String defaultPartitionName, + List partitionNames) throws MetaException { + return false; + } + + } + + static HBaseStore init(Configuration conf, HTableInterface htable, + final SortedMap rows) throws IOException { + ((HiveConf)conf).setVar(ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, NOOPProxy.class.getName()); + Mockito.when(htable.get(Mockito.any(Get.class))).thenAnswer(new Answer() { + @Override + public Result answer(InvocationOnMock invocation) throws Throwable { + Get get = (Get) invocation.getArguments()[0]; + Cell cell = rows.get(new String(get.getRow())); + if (cell == null) { + return new Result(); + } else { + return Result.create(new Cell[]{cell}); + } + } + }); + + Mockito.when(htable.get(Mockito.anyListOf(Get.class))).thenAnswer(new Answer() { + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + List gets = (List) invocation.getArguments()[0]; + Result[] results = new Result[gets.size()]; + for (int i = 0; i < gets.size(); i++) { + Cell cell = rows.get(new String(gets.get(i).getRow())); + Result result; + if (cell == null) { + result = new Result(); + } else { + result = Result.create(new Cell[]{cell}); + } + results[i] = result; + } + return results; + } + }); + + Mockito.when(htable.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer() { + @Override + public ResultScanner answer(InvocationOnMock invocation) throws Throwable { + Scan scan = (Scan)invocation.getArguments()[0]; + List results = new ArrayList(); + String start = new String(scan.getStartRow()); + String stop = new String(scan.getStopRow()); + SortedMap sub = rows.subMap(start, stop); + for (Map.Entry e : sub.entrySet()) { + results.add(Result.create(new Cell[]{e.getValue()})); + } + + final Iterator iter = results.iterator(); + + return new ResultScanner() { + @Override + public Result next() throws IOException { + return null; + } + + @Override + public Result[] next(int nbRows) throws IOException { + return new Result[0]; + } + + @Override + public void close() { + + } + + @Override + public Iterator iterator() { + return iter; + } + }; + } + }); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Put put = (Put)invocation.getArguments()[0]; + rows.put(new String(put.getRow()), put.getFamilyCellMap().firstEntry().getValue().get(0)); + return null; + } + }).when(htable).put(Mockito.any(Put.class)); + + Mockito.when(htable.checkAndPut(Mockito.any(byte[].class), Mockito.any(byte[].class), + Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Put.class))).thenAnswer( + new Answer() { + + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + // Always say it succeeded and overwrite + Put put = (Put)invocation.getArguments()[4]; + rows.put(new String(put.getRow()), + put.getFamilyCellMap().firstEntry().getValue().get(0)); + return true; + } + }); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Delete del = (Delete)invocation.getArguments()[0]; + rows.remove(new String(del.getRow())); + return null; + } + }).when(htable).delete(Mockito.any(Delete.class)); + + Mockito.when(htable.checkAndDelete(Mockito.any(byte[].class), Mockito.any(byte[].class), + Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Delete.class))).thenAnswer( + new Answer() { + + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + // Always say it succeeded + Delete del = (Delete)invocation.getArguments()[4]; + rows.remove(new String(del.getRow())); + return true; + } + }); + + // Mock connection + HBaseConnection hconn = Mockito.mock(HBaseConnection.class); + Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable); + HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN); + HBaseReadWrite.setTestConnection(hconn); + HBaseReadWrite hbase = HBaseReadWrite.getInstance(conf); + HBaseStore store = new HBaseStore(); + store.setConf(conf); + return store; + } +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java new file mode 100644 index 0000000..af8f5fc --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + +public class TestHBaseAggregateStatsCache { + private static final Log LOG = LogFactory.getLog(TestHBaseAggregateStatsCache.class.getName()); + + @Mock HTableInterface htable; + private HBaseStore store; + SortedMap rows = new TreeMap<>(); + + @Before + public void before() throws IOException { + MockitoAnnotations.initMocks(this); + HiveConf conf = new HiveConf(); + conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + store = MockUtils.init(conf, htable, rows); + store.backdoor().getStatsCache().resetCounters(); + } + + private static interface Checker { + void checkStats(AggrStats aggrStats) throws Exception; + } + + // Do to limitations in the Mock infrastructure we use for HBase testing we can only test + // this for a single column table and we can't really test hits in hbase, only in memory or + // build from scratch. But it's still useful to cover many bugs. More in depth testing with + // multiple columns and with HBase hits is done in TestHBaseAggrStatsCacheIntegration. + + @Test + public void allWithStats() throws Exception { + String dbName = "default"; + String tableName = "hit"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData bcsd = new BooleanColumnStatsData(); + bcsd.setNumFalses(10); + bcsd.setNumTrues(20); + bcsd.setNumNulls(30); + data.setBooleanStats(bcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(2, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("boolean", cso.getColType()); + BooleanColumnStatsData bcsd = cso.getStatsData().getBooleanStats(); + Assert.assertEquals(20, bcsd.getNumFalses()); + Assert.assertEquals(40, bcsd.getNumTrues()); + Assert.assertEquals(60, bcsd.getNumNulls()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + + @Test + public void noneWithStats() throws Exception { + String dbName = "default"; + String tableName = "nws"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + for (List partVals : Arrays.asList(partVals1, partVals2)) { + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/nws/ds=" + partVals.get(0)); + Partition part = new Partition(partVals, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + } + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(0, aggrStats.getPartsFound()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + } + + @Test + public void someNonexistentPartitions() throws Exception { + String dbName = "default"; + String tableName = "snp"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/default/hit/ds=" + partVals1.get(0)); + Partition part = new Partition(partVals1, dbName, tableName, (int) now, (int) now, psd, + Collections.emptyMap()); + store.addPartition(part); + + ColumnStatistics cs = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); + desc.setLastAnalyzed(now); + desc.setPartName("ds=" + partVals1.get(0)); + cs.setStatsDesc(desc); + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName("col1"); + obj.setColType("double"); + ColumnStatisticsData data = new ColumnStatisticsData(); + DoubleColumnStatsData dcsd = new DoubleColumnStatsData(); + dcsd.setHighValue(1000.2342343); + dcsd.setLowValue(-20.1234213423); + dcsd.setNumNulls(30); + dcsd.setNumDVs(12342); + data.setDoubleStats(dcsd); + obj.setStatsData(data); + cs.addToStatsObj(obj); + + store.updatePartitionColumnStatistics(cs, partVals1); + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(1, aggrStats.getPartsFound()); + Assert.assertEquals(1, aggrStats.getColStatsSize()); + ColumnStatisticsObj cso = aggrStats.getColStats().get(0); + Assert.assertEquals("col1", cso.getColName()); + Assert.assertEquals("double", cso.getColType()); + DoubleColumnStatsData dcsd = cso.getStatsData().getDoubleStats(); + Assert.assertEquals(1000.23, dcsd.getHighValue(), 0.01); + Assert.assertEquals(-20.12, dcsd.getLowValue(), 0.01); + Assert.assertEquals(30, dcsd.getNumNulls()); + Assert.assertEquals(12342, dcsd.getNumDVs()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + + // Call again, this time it should come from memory. Also, reverse the name order this time + // to assure that we still hit. + aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=yesterday", "ds=today"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(2, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + + @Test + public void nonexistentPartitions() throws Exception { + String dbName = "default"; + String tableName = "nep"; + List partVals1 = Arrays.asList("today"); + List partVals2 = Arrays.asList("yesterday"); + long now = System.currentTimeMillis(); + + List cols = new ArrayList<>(); + cols.add(new FieldSchema("col1", "boolean", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, Collections.emptyMap()); + List partCols = new ArrayList<>(); + partCols.add(new FieldSchema("ds", "string", "")); + Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, + Collections.emptyMap(), null, null, null); + store.createTable(table); + + Checker statChecker = new Checker() { + @Override + public void checkStats(AggrStats aggrStats) throws Exception { + Assert.assertEquals(0, aggrStats.getPartsFound()); + } + }; + + AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, + Arrays.asList("ds=today", "ds=yesterday"), Arrays.asList("col1")); + statChecker.checkStats(aggrStats); + + // Check that we had to build it from the stats + Assert.assertEquals(0, store.backdoor().getStatsCache().hbaseHits.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().totalGets.getCnt()); + Assert.assertEquals(1, store.backdoor().getStatsCache().misses.getCnt()); + } + // TODO test invalidation +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java new file mode 100644 index 0000000..06884b3 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseFilterPlanUtil.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.metastore.PartFilterExprUtil; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.FilterPlan; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.MultiScanPlan; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan.ScanMarker; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.primitives.Shorts; + +public class TestHBaseFilterPlanUtil { + final boolean INCLUSIVE = true; + + /** + * Test the function that compares byte arrays + */ + @Test + public void testCompare() { + + Assert.assertEquals(-1, HBaseFilterPlanUtil.compare(new byte[] { 1, 2 }, new byte[] { 1, 3 })); + Assert.assertEquals(-1, + HBaseFilterPlanUtil.compare(new byte[] { 1, 2, 3 }, new byte[] { 1, 3 })); + Assert.assertEquals(-1, + HBaseFilterPlanUtil.compare(new byte[] { 1, 2 }, new byte[] { 1, 2, 3 })); + + Assert.assertEquals(0, HBaseFilterPlanUtil.compare(new byte[] { 3, 2 }, new byte[] { 3, 2 })); + + Assert + .assertEquals(1, HBaseFilterPlanUtil.compare(new byte[] { 3, 2, 1 }, new byte[] { 3, 2 })); + Assert + .assertEquals(1, HBaseFilterPlanUtil.compare(new byte[] { 3, 3, 1 }, new byte[] { 3, 2 })); + + } + + /** + * Test function that finds greater/lesser marker + */ + @Test + public void testgetComparedMarker() { + ScanMarker l; + ScanMarker r; + + // equal plans + l = new ScanMarker("1", INCLUSIVE, "int"); + r = new ScanMarker("1", INCLUSIVE, "int"); + assertFirstGreater(l, r); + + l = new ScanMarker("1", !INCLUSIVE, "int"); + r = new ScanMarker("1", !INCLUSIVE, "int"); + assertFirstGreater(l, r); + + assertFirstGreater(null, null); + + // create l is greater because of inclusive flag + l = new ScanMarker("1", !INCLUSIVE, "int"); + // the rule for null vs non-null is different + // non-null is both smaller and greater than null + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, true)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, true)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(l, null, false)); + Assert.assertEquals(l, ScanPlan.getComparedMarker(null, l, false)); + + // create l that is greater because of the bytes + l = new ScanMarker("2", INCLUSIVE, "int"); + r = new ScanMarker("1", INCLUSIVE, "int"); + assertFirstGreater(l, r); + + } + + private void assertFirstGreater(ScanMarker big, ScanMarker small) { + Assert.assertEquals(big, ScanPlan.getComparedMarker(big, small, true)); + Assert.assertEquals(big, ScanPlan.getComparedMarker(small, big, true)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(big, small, false)); + Assert.assertEquals(small, ScanPlan.getComparedMarker(small, big, false)); + } + + /** + * Test ScanPlan AND operation + */ + @Test + public void testScanPlanAnd() { + ScanPlan l = new ScanPlan(); + ScanPlan r = new ScanPlan(); + l.setStartMarker("a", "int", "10", INCLUSIVE); + r.setStartMarker("a", "int", "10", INCLUSIVE); + + ScanPlan res; + // both equal + res = l.and(r).getPlans().get(0); + Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker); + + // add equal end markers as well, and test AND again + l.setEndMarker("a", "int", "20", INCLUSIVE); + r.setEndMarker("a", "int", "20", INCLUSIVE); + res = l.and(r).getPlans().get(0); + Assert.assertEquals(new ScanMarker("10", INCLUSIVE, "int"), res.markers.get("a").startMarker); + Assert.assertEquals(new ScanMarker("20", INCLUSIVE, "int"), res.markers.get("a").endMarker); + + l.setStartMarker("a", "int", "10", !INCLUSIVE); + l.setEndMarker("a", "int", "20", INCLUSIVE); + + r.setStartMarker("a", "int", "10", INCLUSIVE); + r.setEndMarker("a", "int", "15", INCLUSIVE); + res = l.and(r).getPlans().get(0); + // start of l is greater, end of r is smaller + Assert.assertEquals(l.markers.get("a").startMarker, res.markers.get("a").startMarker); + Assert.assertEquals(r.markers.get("a").endMarker, res.markers.get("a").endMarker); + + } + + /** + * Test ScanPlan OR operation + */ + @Test + public void testScanPlanOr() { + ScanPlan l = new ScanPlan(); + ScanPlan r = new ScanPlan(); + l.setStartMarker("a", "int", "1", INCLUSIVE); + r.setStartMarker("a", "int", "11", INCLUSIVE); + + FilterPlan res1 = l.or(r); + Assert.assertEquals(2, res1.getPlans().size()); + res1.getPlans().get(0).markers.get("a").startMarker.equals(l.markers.get("a").startMarker); + res1.getPlans().get(1).markers.get("a").startMarker.equals(r.markers.get("a").startMarker); + + FilterPlan res2 = res1.or(r); + Assert.assertEquals(3, res2.getPlans().size()); + } + + /** + * Test MultiScanPlan OR + */ + @Test + public void testMultiScanPlanOr() { + + MultiScanPlan l = createMultiScanPlan(new ScanPlan()); + MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + // verify OR of two multi plans with one plan each + Assert.assertEquals(2, l.or(r).getPlans().size()); + + // verify OR of multi plan with a single scanplan + Assert.assertEquals(2, l.or(new ScanPlan()).getPlans().size()); + Assert.assertEquals(2, (new ScanPlan()).or(l).getPlans().size()); + + // verify or of two multiplans with more than one scan plan + r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + Assert.assertEquals(3, l.or(r).getPlans().size()); + Assert.assertEquals(3, r.or(l).getPlans().size()); + + } + + private MultiScanPlan createMultiScanPlan(ScanPlan... scanPlans) { + return new MultiScanPlan(Arrays.asList(scanPlans)); + } + + /** + * Test MultiScanPlan AND + */ + @Test + public void testMultiScanPlanAnd() { + MultiScanPlan l = createMultiScanPlan(new ScanPlan()); + MultiScanPlan r = createMultiScanPlan(new ScanPlan()); + + // two MultiScanPlan with single scan plans should result in new FilterPlan + // with just one scan + Assert.assertEquals(1, l.and(r).getPlans().size()); + + // l has one ScanPlan, r has two. AND result should have two + r = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + Assert.assertEquals(2, l.and(r).getPlans().size()); + Assert.assertEquals(2, r.and(l).getPlans().size()); + + // l has 2 ScanPlans, r has 3. AND result should have 6 + l = createMultiScanPlan(new ScanPlan(), new ScanPlan()); + r = createMultiScanPlan(new ScanPlan(), new ScanPlan(), new ScanPlan()); + Assert.assertEquals(6, l.and(r).getPlans().size()); + Assert.assertEquals(6, r.and(l).getPlans().size()); + } + + /** + * Test plan generation from LeafNode + * + * @throws MetaException + */ + @Test + public void testLeafNodePlan() throws MetaException { + + final String KEY = "k1"; + final String VAL = "v1"; + final String OTHERKEY = "k2"; + LeafNode l = new LeafNode(); + l.keyName = KEY; + l.value = VAL; + final ScanMarker DEFAULT_SCANMARKER = null; + List parts = new ArrayList(); + parts.add(new FieldSchema(KEY, "int", null)); + parts.add(new FieldSchema(OTHERKEY, "int", null)); + + l.operator = Operator.EQUALS; + verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), new ScanMarker(VAL, INCLUSIVE, "int")); + + l.operator = Operator.GREATERTHAN; + verifyPlan(l, parts, KEY, new ScanMarker(VAL, !INCLUSIVE, "int"), DEFAULT_SCANMARKER); + + l.operator = Operator.GREATERTHANOREQUALTO; + verifyPlan(l, parts, KEY, new ScanMarker(VAL, INCLUSIVE, "int"), DEFAULT_SCANMARKER); + + l.operator = Operator.LESSTHAN; + verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, !INCLUSIVE, "int")); + + l.operator = Operator.LESSTHANOREQUALTO; + verifyPlan(l, parts, KEY, DEFAULT_SCANMARKER, new ScanMarker(VAL, INCLUSIVE, "int")); + + // following leaf node plans should currently have true for 'has unsupported condition', + // because of the condition is not on first key + l.operator = Operator.EQUALS; + verifyPlan(l, parts, OTHERKEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, false); + + // if tree is null, it should return equivalent of full scan, and true + // for 'has unsupported condition' + verifyPlan(null, parts, KEY, DEFAULT_SCANMARKER, DEFAULT_SCANMARKER, true); + + } + + private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker) + throws MetaException { + verifyPlan(l, parts, keyName, startMarker, endMarker, false); + } + + private void verifyPlan(TreeNode l, List parts, String keyName, ScanMarker startMarker, ScanMarker endMarker, + boolean hasUnsupportedCondition) throws MetaException { + ExpressionTree e = null; + if (l != null) { + e = new ExpressionTree(); + e.setRootForTest(l); + } + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + FilterPlan plan = planRes.plan; + Assert.assertEquals("Has unsupported condition", hasUnsupportedCondition, + planRes.hasUnsupportedCondition); + Assert.assertEquals(1, plan.getPlans().size()); + ScanPlan splan = plan.getPlans().get(0); + if (startMarker != null) { + Assert.assertEquals(startMarker, splan.markers.get(keyName).startMarker); + } else { + Assert.assertTrue(splan.markers.get(keyName)==null || + splan.markers.get(keyName).startMarker==null); + } + if (endMarker != null) { + Assert.assertEquals(endMarker, splan.markers.get(keyName).endMarker); + } else { + Assert.assertTrue(splan.markers.get(keyName)==null || + splan.markers.get(keyName).endMarker==null); + } + } + + /** + * Test plan generation from TreeNode + * + * @throws MetaException + */ + @Test + public void testTreeNodePlan() throws MetaException { + + final String KEY = "k1"; + final String VAL1 = "10"; + final String VAL2 = "11"; + LeafNode l = new LeafNode(); + l.keyName = KEY; + l.value = VAL1; + final ScanMarker DEFAULT_SCANMARKER = null; + + List parts = new ArrayList(); + parts.add(new FieldSchema("k1", "int", null)); + + LeafNode r = new LeafNode(); + r.keyName = KEY; + r.value = VAL2; + + TreeNode tn = new TreeNode(l, LogicalOperator.AND, r); + + // verify plan for - k1 >= '10' and k1 < '11' + l.operator = Operator.GREATERTHANOREQUALTO; + r.operator = Operator.LESSTHAN; + verifyPlan(tn, parts, KEY, new ScanMarker(VAL1, INCLUSIVE, "int"), new ScanMarker(VAL2, + !INCLUSIVE, "int")); + + // verify plan for - k1 >= '10' and k1 > '11' + l.operator = Operator.GREATERTHANOREQUALTO; + r.operator = Operator.GREATERTHAN; + verifyPlan(tn, parts, KEY, new ScanMarker(VAL2, !INCLUSIVE, "int"), DEFAULT_SCANMARKER); + + // verify plan for - k1 >= '10' or k1 > '11' + tn = new TreeNode(l, LogicalOperator.OR, r); + ExpressionTree e = new ExpressionTree(); + e.setRootForTest(tn); + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + Assert.assertEquals(2, planRes.plan.getPlans().size()); + Assert.assertEquals(false, planRes.hasUnsupportedCondition); + + // verify plan for - k1 >= '10' and (k1 >= '10' or k1 > '11') + TreeNode tn2 = new TreeNode(l, LogicalOperator.AND, tn); + e = new ExpressionTree(); + e.setRootForTest(tn2); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + Assert.assertEquals(2, planRes.plan.getPlans().size()); + Assert.assertEquals(false, planRes.hasUnsupportedCondition); + + // verify plan for (k1 >= '10' and (k1 >= '10' or k1 > '11')) or k1 LIKE '2' + // plan should return true for hasUnsupportedCondition + LeafNode klike = new LeafNode(); + klike.keyName = KEY; + klike.value = VAL1; + klike.operator = Operator.LIKE; + TreeNode tn3 = new TreeNode(tn2, LogicalOperator.OR, klike); + e = new ExpressionTree(); + e.setRootForTest(tn3); + planRes = HBaseFilterPlanUtil.getFilterPlan(e, parts); + Assert.assertEquals(3, planRes.plan.getPlans().size()); + Assert.assertEquals(false, planRes.hasUnsupportedCondition); + + + } + + @Test + public void testPartitionKeyScannerAllString() throws Exception { + List parts = new ArrayList(); + parts.add(new FieldSchema("year", "string", null)); + parts.add(new FieldSchema("month", "string", null)); + parts.add(new FieldSchema("state", "string", null)); + + // One prefix key and one minor key range + ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + + Assert.assertEquals(planRes.plan.getPlans().size(), 1); + + ScanPlan sp = planRes.plan.getPlans().get(0); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + RowFilter filter = (RowFilter)sp.getFilter(parts); + + // scan range contains the major key year, rowfilter contains minor key state + Assert.assertTrue(Bytes.contains(startRowSuffix, "2015".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "2015".getBytes())); + Assert.assertFalse(Bytes.contains(startRowSuffix, "CA".getBytes())); + Assert.assertFalse(Bytes.contains(endRowSuffix, "CA".getBytes())); + + PartitionKeyComparator comparator = (PartitionKeyComparator)filter.getComparator(); + Assert.assertEquals(comparator.ranges.size(), 1); + Assert.assertEquals(comparator.ranges.get(0).keyName, "state"); + + // Two prefix key and one LIKE operator + exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and month > 10 " + + "and month <= 11 and state like 'C%'").tree; + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + + Assert.assertEquals(planRes.plan.getPlans().size(), 1); + + sp = planRes.plan.getPlans().get(0); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + filter = (RowFilter)sp.getFilter(parts); + + // scan range contains the major key value year/month, rowfilter contains LIKE operator + Assert.assertTrue(Bytes.contains(startRowSuffix, "2015".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "2015".getBytes())); + Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "11".getBytes())); + + comparator = (PartitionKeyComparator)filter.getComparator(); + Assert.assertEquals(comparator.ops.size(), 1); + Assert.assertEquals(comparator.ops.get(0).keyName, "state"); + + // One prefix key, one minor key range and one LIKE operator + exprTree = PartFilterExprUtil.getFilterParser("year >= 2014 and month > 10 " + + "and month <= 11 and state like 'C%'").tree; + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + + Assert.assertEquals(planRes.plan.getPlans().size(), 1); + + sp = planRes.plan.getPlans().get(0); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + filter = (RowFilter)sp.getFilter(parts); + + // scan range contains the major key value year (low bound), rowfilter contains minor key state + // and LIKE operator + Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); + + comparator = (PartitionKeyComparator)filter.getComparator(); + Assert.assertEquals(comparator.ranges.size(), 1); + Assert.assertEquals(comparator.ranges.get(0).keyName, "month"); + Assert.assertEquals(comparator.ops.size(), 1); + Assert.assertEquals(comparator.ops.get(0).keyName, "state"); + + // Condition contains or + exprTree = PartFilterExprUtil.getFilterParser("year = 2014 and (month > 10 " + + "or month < 3)").tree; + planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + + sp = planRes.plan.getPlans().get(0); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + filter = (RowFilter)sp.getFilter(parts); + + // The first ScanPlan contains year = 2014 and month > 10 + Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "2014".getBytes())); + Assert.assertTrue(Bytes.contains(startRowSuffix, "10".getBytes())); + + sp = planRes.plan.getPlans().get(1); + startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + filter = (RowFilter)sp.getFilter(parts); + + // The first ScanPlan contains year = 2014 and month < 3 + Assert.assertTrue(Bytes.contains(startRowSuffix, "2014".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "2014".getBytes())); + Assert.assertTrue(Bytes.contains(endRowSuffix, "3".getBytes())); + } + + @Test + public void testPartitionKeyScannerMixedType() throws Exception { + List parts = new ArrayList(); + parts.add(new FieldSchema("year", "int", null)); + parts.add(new FieldSchema("month", "int", null)); + parts.add(new FieldSchema("state", "string", null)); + + // One prefix key and one minor key range + ExpressionTree exprTree = PartFilterExprUtil.getFilterParser("year = 2015 and state = 'CA'").tree; + PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, parts); + + Assert.assertEquals(planRes.plan.getPlans().size(), 1); + + ScanPlan sp = planRes.plan.getPlans().get(0); + byte[] startRowSuffix = sp.getStartRowSuffix("testdb", "testtb", parts); + byte[] endRowSuffix = sp.getEndRowSuffix("testdb", "testtb", parts); + RowFilter filter = (RowFilter)sp.getFilter(parts); + + // scan range contains the major key year, rowfilter contains minor key state + Assert.assertTrue(Bytes.contains(startRowSuffix, Shorts.toByteArray((short)2015))); + Assert.assertTrue(Bytes.contains(endRowSuffix, Shorts.toByteArray((short)2016))); + + PartitionKeyComparator comparator = (PartitionKeyComparator)filter.getComparator(); + Assert.assertEquals(comparator.ranges.size(), 1); + Assert.assertEquals(comparator.ranges.get(0).keyName, "state"); + } +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java new file mode 100644 index 0000000..b6dfcf3 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -0,0 +1,1307 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.io.IOException; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * + */ +public class TestHBaseStore { + private static final Log LOG = LogFactory.getLog(TestHBaseStore.class.getName()); + static Map emptyParameters = new HashMap(); + // Table with NUM_PART_KEYS partitioning keys and NUM_PARTITIONS values per key + static final int NUM_PART_KEYS = 1; + static final int NUM_PARTITIONS = 5; + static final String DB = "db"; + static final String TBL = "tbl"; + static final String COL = "col"; + static final String PART_KEY_PREFIX = "part"; + static final String PART_VAL_PREFIX = "val"; + static final String PART_KV_SEPARATOR = "="; + static final List PART_KEYS = new ArrayList(); + static final List PART_VALS = new ArrayList(); + // Initialize mock partitions + static { + for (int i = 1; i <= NUM_PART_KEYS; i++) { + PART_KEYS.add(PART_KEY_PREFIX + i); + } + for (int i = 1; i <= NUM_PARTITIONS; i++) { + PART_VALS.add(PART_VAL_PREFIX + i); + } + } + static final long DEFAULT_TIME = System.currentTimeMillis(); + static final String PART_KEY = "part"; + static final String BOOLEAN_COL = "boolCol"; + static final String BOOLEAN_TYPE = "boolean"; + static final String BOOLEAN_VAL = "true"; + static final String LONG_COL = "longCol"; + static final String LONG_TYPE = "long"; + static final String INT_TYPE = "int"; + static final String INT_VAL = "1234"; + static final String DOUBLE_COL = "doubleCol"; + static final String DOUBLE_TYPE = "double"; + static final String DOUBLE_VAL = "3.1415"; + static final String STRING_COL = "stringCol"; + static final String STRING_TYPE = "string"; + static final String STRING_VAL = "stringval"; + static final String BINARY_COL = "binaryCol"; + static final String BINARY_TYPE = "binary"; + static final String BINARY_VAL = "1"; + static final String DECIMAL_COL = "decimalCol"; + static final String DECIMAL_TYPE = "decimal(5,3)"; + static final String DECIMAL_VAL = "12.123"; + static List booleanColStatsObjs = new ArrayList( + NUM_PARTITIONS); + static List longColStatsObjs = new ArrayList( + NUM_PARTITIONS); + static List doubleColStatsObjs = new ArrayList( + NUM_PARTITIONS); + static List stringColStatsObjs = new ArrayList( + NUM_PARTITIONS); + static List binaryColStatsObjs = new ArrayList( + NUM_PARTITIONS); + static List decimalColStatsObjs = new ArrayList( + NUM_PARTITIONS); + + @Rule public ExpectedException thrown = ExpectedException.none(); + @Mock HTableInterface htable; + SortedMap rows = new TreeMap<>(); + HBaseStore store; + + + @BeforeClass + public static void beforeTest() { + // All data intitializations + populateMockStats(); + } + + private static void populateMockStats() { + ColumnStatisticsObj statsObj; + // Add NUM_PARTITIONS ColumnStatisticsObj of each type + // For aggregate stats test, we'll treat each ColumnStatisticsObj as stats for 1 partition + // For the rest, we'll just pick the 1st ColumnStatisticsObj from this list and use it + for (int i = 0; i < NUM_PARTITIONS; i++) { + statsObj = mockBooleanStats(i); + booleanColStatsObjs.add(statsObj); + statsObj = mockLongStats(i); + longColStatsObjs.add(statsObj); + statsObj = mockDoubleStats(i); + doubleColStatsObjs.add(statsObj); + statsObj = mockStringStats(i); + stringColStatsObjs.add(statsObj); + statsObj = mockBinaryStats(i); + binaryColStatsObjs.add(statsObj); + statsObj = mockDecimalStats(i); + decimalColStatsObjs.add(statsObj); + } + } + + private static ColumnStatisticsObj mockBooleanStats(int i) { + long trues = 37 + 100*i; + long falses = 12 + 50*i; + long nulls = 2 + i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(BOOLEAN_COL); + colStatsObj.setColType(BOOLEAN_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(trues); + boolData.setNumFalses(falses); + boolData.setNumNulls(nulls); + data.setBooleanStats(boolData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + private static ColumnStatisticsObj mockLongStats(int i) { + long high = 120938479124L + 100*i; + long low = -12341243213412124L - 50*i; + long nulls = 23 + i; + long dVs = 213L + 10*i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(LONG_COL); + colStatsObj.setColType(LONG_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + LongColumnStatsData longData = new LongColumnStatsData(); + longData.setHighValue(high); + longData.setLowValue(low); + longData.setNumNulls(nulls); + longData.setNumDVs(dVs); + data.setLongStats(longData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + private static ColumnStatisticsObj mockDoubleStats(int i) { + double high = 123423.23423 + 100*i; + double low = 0.00001234233 - 50*i; + long nulls = 92 + i; + long dVs = 1234123421L + 10*i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(DOUBLE_COL); + colStatsObj.setColType(DOUBLE_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(); + doubleData.setHighValue(high); + doubleData.setLowValue(low); + doubleData.setNumNulls(nulls); + doubleData.setNumDVs(dVs); + data.setDoubleStats(doubleData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + private static ColumnStatisticsObj mockStringStats(int i) { + long maxLen = 1234 + 10*i; + double avgLen = 32.3 + i; + long nulls = 987 + 10*i; + long dVs = 906 + i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(STRING_COL); + colStatsObj.setColType(STRING_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + StringColumnStatsData stringData = new StringColumnStatsData(); + stringData.setMaxColLen(maxLen); + stringData.setAvgColLen(avgLen); + stringData.setNumNulls(nulls); + stringData.setNumDVs(dVs); + data.setStringStats(stringData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + private static ColumnStatisticsObj mockBinaryStats(int i) {; + long maxLen = 123412987L + 10*i; + double avgLen = 76.98 + i; + long nulls = 976998797L + 10*i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(BINARY_COL); + colStatsObj.setColType(BINARY_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + BinaryColumnStatsData binaryData = new BinaryColumnStatsData(); + binaryData.setMaxColLen(maxLen); + binaryData.setAvgColLen(avgLen); + binaryData.setNumNulls(nulls); + data.setBinaryStats(binaryData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + private static ColumnStatisticsObj mockDecimalStats(int i) { + Decimal high = new Decimal(); + high.setScale((short)3); + String strHigh = String.valueOf(3876 + 100*i); + high.setUnscaled(strHigh.getBytes()); + Decimal low = new Decimal(); + low.setScale((short)3); + String strLow = String.valueOf(38 + i); + low.setUnscaled(strLow.getBytes()); + long nulls = 13 + i; + long dVs = 923947293L + 100*i; + ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(); + colStatsObj.setColName(DECIMAL_COL); + colStatsObj.setColType(DECIMAL_TYPE); + ColumnStatisticsData data = new ColumnStatisticsData(); + DecimalColumnStatsData decimalData = new DecimalColumnStatsData(); + decimalData.setHighValue(high); + decimalData.setLowValue(low); + decimalData.setNumNulls(nulls); + decimalData.setNumDVs(dVs); + data.setDecimalStats(decimalData); + colStatsObj.setStatsData(data); + return colStatsObj; + } + + @AfterClass + public static void afterTest() { + } + + + @Before + public void init() throws IOException { + MockitoAnnotations.initMocks(this); + HiveConf conf = new HiveConf(); + conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true); + store = MockUtils.init(conf, htable, rows); + } + + @Test + public void createDb() throws Exception { + String dbname = "mydb"; + Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + + Database d = store.getDatabase(dbname); + Assert.assertEquals(dbname, d.getName()); + Assert.assertEquals("no description", d.getDescription()); + Assert.assertEquals("file:///tmp", d.getLocationUri()); + } + + @Test + public void alterDb() throws Exception { + String dbname = "mydb"; + Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + db.setDescription("a description"); + store.alterDatabase(dbname, db); + + Database d = store.getDatabase(dbname); + Assert.assertEquals(dbname, d.getName()); + Assert.assertEquals("a description", d.getDescription()); + Assert.assertEquals("file:///tmp", d.getLocationUri()); + } + + @Test + public void dropDb() throws Exception { + String dbname = "anotherdb"; + Database db = new Database(dbname, "no description", "file:///tmp", emptyParameters); + store.createDatabase(db); + + Database d = store.getDatabase(dbname); + Assert.assertNotNull(d); + + store.dropDatabase(dbname); + thrown.expect(NoSuchObjectException.class); + store.getDatabase(dbname); + } + + @Test + public void createFunction() throws Exception { + String funcName = "createfunc"; + int now = (int)(System.currentTimeMillis()/ 1000); + Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, + now, FunctionType.JAVA, Arrays.asList(new ResourceUri(ResourceType.JAR, + "file:/tmp/somewhere"))); + store.createFunction(func); + + Function f = store.getFunction(DB, funcName); + Assert.assertEquals(DB, f.getDbName()); + Assert.assertEquals(funcName, f.getFunctionName()); + Assert.assertEquals("o.a.h.h.myfunc", f.getClassName()); + Assert.assertEquals("me", f.getOwnerName()); + Assert.assertEquals(PrincipalType.USER, f.getOwnerType()); + Assert.assertTrue(now <= f.getCreateTime()); + Assert.assertEquals(FunctionType.JAVA, f.getFunctionType()); + Assert.assertEquals(1, f.getResourceUrisSize()); + Assert.assertEquals(ResourceType.JAR, f.getResourceUris().get(0).getResourceType()); + Assert.assertEquals("file:/tmp/somewhere", f.getResourceUris().get(0).getUri()); + } + + @Test + public void alterFunction() throws Exception { + String funcName = "alterfunc"; + int now = (int)(System.currentTimeMillis()/ 1000); + List uris = new ArrayList(); + uris.add(new ResourceUri(ResourceType.FILE, "whatever")); + Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, + now, FunctionType.JAVA, uris); + store.createFunction(func); + + Function f = store.getFunction(DB, funcName); + Assert.assertEquals(ResourceType.FILE, f.getResourceUris().get(0).getResourceType()); + + func.addToResourceUris(new ResourceUri(ResourceType.ARCHIVE, "file")); + store.alterFunction(DB, funcName, func); + + f = store.getFunction(DB, funcName); + Assert.assertEquals(2, f.getResourceUrisSize()); + Assert.assertEquals(ResourceType.FILE, f.getResourceUris().get(0).getResourceType()); + Assert.assertEquals(ResourceType.ARCHIVE, f.getResourceUris().get(1).getResourceType()); + + } + + @Test + public void dropFunction() throws Exception { + String funcName = "delfunc"; + int now = (int)(System.currentTimeMillis()/ 1000); + Function func = new Function(funcName, DB, "o.a.h.h.myfunc", "me", PrincipalType.USER, + now, FunctionType.JAVA, Arrays.asList(new ResourceUri(ResourceType.JAR, "file:/tmp/somewhere"))); + store.createFunction(func); + + Function f = store.getFunction(DB, funcName); + Assert.assertNotNull(f); + + store.dropFunction(DB, funcName); + //thrown.expect(NoSuchObjectException.class); + Assert.assertNull(store.getFunction(DB, funcName)); + } + + @Test + public void createTable() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertFalse(t.getSd().isCompressed()); + Assert.assertEquals(17, t.getSd().getNumBuckets()); + Assert.assertEquals(1, t.getSd().getBucketColsSize()); + Assert.assertEquals("bucketcol", t.getSd().getBucketCols().get(0)); + Assert.assertEquals(1, t.getSd().getSortColsSize()); + Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol()); + Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder()); + Assert.assertEquals(1, t.getSd().getParametersSize()); + Assert.assertEquals("value", t.getSd().getParameters().get("key")); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(0, t.getParametersSize()); + } + + @Test + public void skewInfo() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, + serde, null, null, emptyParameters); + + Map, String> map = new HashMap, String>(); + map.put(Arrays.asList("col3"), "col4"); + SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), + map); + sd.setSkewedInfo(skew); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertTrue(t.getSd().isCompressed()); + Assert.assertEquals(0, t.getSd().getNumBuckets()); + Assert.assertEquals(0, t.getSd().getSortColsSize()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(0, t.getParametersSize()); + + skew = t.getSd().getSkewedInfo(); + Assert.assertNotNull(skew); + Assert.assertEquals(1, skew.getSkewedColNamesSize()); + Assert.assertEquals("col1", skew.getSkewedColNames().get(0)); + Assert.assertEquals(1, skew.getSkewedColValuesSize()); + Assert.assertEquals("col2", skew.getSkewedColValues().get(0).get(0)); + Assert.assertEquals(1, skew.getSkewedColValueLocationMapsSize()); + Assert.assertEquals("col4", skew.getSkewedColValueLocationMaps().get(Arrays.asList("col3"))); + + } + + @Test + public void hashSd() throws Exception { + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", true, 0, + serde, null, null, emptyParameters); + + Map, String> map = new HashMap, String>(); + map.put(Arrays.asList("col3"), "col4"); + SkewedInfo skew = new SkewedInfo(Arrays.asList("col1"), Arrays.asList(Arrays.asList("col2")), + map); + sd.setSkewedInfo(skew); + + MessageDigest md = MessageDigest.getInstance("MD5"); + byte[] baseHash = HBaseUtils.hashStorageDescriptor(sd, md); + + StorageDescriptor changeSchema = new StorageDescriptor(sd); + changeSchema.getCols().add(new FieldSchema("col2", "varchar(32)", "a comment")); + byte[] schemaHash = HBaseUtils.hashStorageDescriptor(changeSchema, md); + Assert.assertFalse(Arrays.equals(baseHash, schemaHash)); + + StorageDescriptor changeLocation = new StorageDescriptor(sd); + changeLocation.setLocation("file:/somewhere/else"); + byte[] locationHash = HBaseUtils.hashStorageDescriptor(changeLocation, md); + Assert.assertArrayEquals(baseHash, locationHash); + } + + @Test + public void alterTable() throws Exception { + String tableName = "alttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + startTime += 10; + table.setLastAccessTime(startTime); + store.alterTable("default", tableName, table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(startTime, t.getLastAccessTime()); + } + + @Test + public void dropTable() throws Exception { + String tableName = "dtable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertNotNull(t); + + store.dropTable("default", tableName); + Assert.assertNull(store.getTable("default", tableName)); + } + + @Test + public void createPartition() throws Exception { + String tableName = "myparttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(DB, tableName, vals); + Assert.assertEquals(1, p.getSd().getColsSize()); + Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); + Assert.assertEquals("input", p.getSd().getInputFormat()); + Assert.assertEquals("output", p.getSd().getOutputFormat()); + Assert.assertEquals(DB, p.getDbName()); + Assert.assertEquals(tableName, p.getTableName()); + Assert.assertEquals(1, p.getValuesSize()); + Assert.assertEquals("fred", p.getValues().get(0)); + + Assert.assertTrue(store.doesPartitionExist(DB, tableName, vals)); + Assert.assertFalse(store.doesPartitionExist(DB, tableName, Arrays.asList("bob"))); + } + + @Test + public void alterPartition() throws Exception { + String tableName = "alterparttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + part.setLastAccessTime(startTime + 10); + store.alterPartition(DB, tableName, vals, part); + + Partition p = store.getPartition(DB, tableName, vals); + Assert.assertEquals(1, p.getSd().getColsSize()); + Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); + Assert.assertEquals("input", p.getSd().getInputFormat()); + Assert.assertEquals("output", p.getSd().getOutputFormat()); + Assert.assertEquals(DB, p.getDbName()); + Assert.assertEquals(tableName, p.getTableName()); + Assert.assertEquals(1, p.getValuesSize()); + Assert.assertEquals("fred", p.getValues().get(0)); + Assert.assertEquals(startTime + 10, p.getLastAccessTime()); + + Assert.assertTrue(store.doesPartitionExist(DB, tableName, vals)); + Assert.assertFalse(store.doesPartitionExist(DB, tableName, Arrays.asList("bob"))); + } + + @Test + public void getPartitions() throws Exception { + String tableName = "manyParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + for (String val : partVals) { + List vals = new ArrayList(); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(DB, tableName, vals); + Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); + } + + List parts = store.getPartitions(DB, tableName, -1); + Assert.assertEquals(5, parts.size()); + String[] pv = new String[5]; + for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); + Arrays.sort(pv); + Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); + } + + @Test + public void listGetDropPartitionNames() throws Exception { + String tableName = "listParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + partCols.add(new FieldSchema("region", "string", "")); + Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; + for (String[] pv : partVals) { + List vals = new ArrayList(); + for (String v : pv) vals.add(v); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); + Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + } + + List names = store.listPartitionNames(DB, tableName, (short) -1); + Assert.assertEquals(2, names.size()); + String[] resultNames = names.toArray(new String[names.size()]); + Arrays.sort(resultNames); + Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", + "pc=tomorrow/region=europe"}); + + List parts = store.getPartitionsByNames(DB, tableName, names); + Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); + Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); + + store.dropPartitions(DB, tableName, names); + List afterDropParts = store.getPartitions(DB, tableName, -1); + Assert.assertEquals(0, afterDropParts.size()); + } + + + @Test + public void dropPartition() throws Exception { + String tableName = "myparttable2"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Assert.assertNotNull(store.getPartition(DB, tableName, vals)); + store.dropPartition(DB, tableName, vals); + thrown.expect(NoSuchObjectException.class); + store.getPartition(DB, tableName, vals); + } + + @Test + public void createRole() throws Exception { + int now = (int)System.currentTimeMillis()/1000; + String roleName = "myrole"; + store.addRole(roleName, "me"); + + Role r = store.getRole(roleName); + Assert.assertEquals(roleName, r.getRoleName()); + Assert.assertEquals("me", r.getOwnerName()); + Assert.assertTrue(now <= r.getCreateTime()); + } + + @Test + public void dropRole() throws Exception { + String roleName = "anotherrole"; + store.addRole(roleName, "me"); + + Role role = store.getRole(roleName); + Assert.assertNotNull(role); + + store.removeRole(roleName); + thrown.expect(NoSuchObjectException.class); + store.getRole(roleName); + } + + // Due to the way our mock stuff works, we can only insert one column at a time, so we'll test + // each stat type separately. We'll test them together in the integration tests. + @Test + public void booleanTableStatistics() throws Exception { + // Add a boolean table stats for BOOLEAN_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = booleanColStatsObjs.get(0); + BooleanColumnStatsData boolData = obj.getStatsData().getBooleanStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(BOOLEAN_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, dataFromDB.getSetField()); + // Compare BooleanColumnStatsData + BooleanColumnStatsData boolDataFromDB = dataFromDB.getBooleanStats(); + Assert.assertEquals(boolData.getNumTrues(), boolDataFromDB.getNumTrues()); + Assert.assertEquals(boolData.getNumFalses(), boolDataFromDB.getNumFalses()); + Assert.assertEquals(boolData.getNumNulls(), boolDataFromDB.getNumNulls()); + } + + @Test + public void longTableStatistics() throws Exception { + // Add a long table stats for LONG_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = longColStatsObjs.get(0); + LongColumnStatsData longData = obj.getStatsData().getLongStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(LONG_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); + // Compare LongColumnStatsData + LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); + Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); + Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); + Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); + Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); + } + + @Test + public void doubleTableStatistics() throws Exception { + // Add a double table stats for DOUBLE_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = doubleColStatsObjs.get(0); + DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DOUBLE_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); + // Compare DoubleColumnStatsData + DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); + Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); + Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); + Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); + Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); + } + + @Test + public void stringTableStatistics() throws Exception { + // Add a string table stats for STRING_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = stringColStatsObjs.get(0); + StringColumnStatsData stringData = obj.getStatsData().getStringStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(STRING_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); + // Compare StringColumnStatsData + StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); + Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); + Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); + Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); + Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); + } + + @Test + public void binaryTableStatistics() throws Exception { + // Add a binary table stats for BINARY_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = binaryColStatsObjs.get(0); + BinaryColumnStatsData binaryData = obj.getStatsData().getBinaryStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(BINARY_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, dataFromDB.getSetField()); + // Compare BinaryColumnStatsData + BinaryColumnStatsData binaryDataFromDB = dataFromDB.getBinaryStats(); + Assert.assertEquals(binaryData.getMaxColLen(), binaryDataFromDB.getMaxColLen()); + Assert.assertEquals(binaryData.getAvgColLen(), binaryDataFromDB.getAvgColLen(), 0.01); + Assert.assertEquals(binaryData.getNumNulls(), binaryDataFromDB.getNumNulls()); + } + + @Test + public void decimalTableStatistics() throws Exception { + // Add a decimal table stats for DECIMAL_COL to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for table level stats + ColumnStatisticsDesc desc = getMockTblColStatsDesc(); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = decimalColStatsObjs.get(0); + DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); + // Add to DB + stats.addToStatsObj(obj); + store.updateTableColumnStatistics(stats); + // Get from DB + ColumnStatistics statsFromDB = store.getTableColumnStatistics(DB, TBL, Arrays.asList(DECIMAL_COL)); + // Compare ColumnStatisticsDesc + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.getStatsDesc().getTableName()); + Assert.assertTrue(statsFromDB.getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); + // Compare DecimalColumnStatsData + DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); + Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); + Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); + Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); + Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); + } + + @Test + public void booleanPartitionStatistics() throws Exception { + createMockTableAndPartition(BOOLEAN_TYPE, BOOLEAN_VAL); + // Add partition stats for: BOOLEAN_COL and partition: {PART_KEY, BOOLEAN_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BOOLEAN_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = booleanColStatsObjs.get(0); + BooleanColumnStatsData boolData = obj.getStatsData().getBooleanStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(BOOLEAN_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, dataFromDB.getSetField()); + // Compare BooleanColumnStatsData + BooleanColumnStatsData boolDataFromDB = dataFromDB.getBooleanStats(); + Assert.assertEquals(boolData.getNumTrues(), boolDataFromDB.getNumTrues()); + Assert.assertEquals(boolData.getNumFalses(), boolDataFromDB.getNumFalses()); + Assert.assertEquals(boolData.getNumNulls(), boolDataFromDB.getNumNulls()); + } + + @Test + public void longPartitionStatistics() throws Exception { + createMockTableAndPartition(INT_TYPE, INT_VAL); + // Add partition stats for: LONG_COL and partition: {PART_KEY, INT_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, INT_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = longColStatsObjs.get(0); + LongColumnStatsData longData = obj.getStatsData().getLongStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(INT_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.LONG_STATS, dataFromDB.getSetField()); + // Compare LongColumnStatsData + LongColumnStatsData longDataFromDB = dataFromDB.getLongStats(); + Assert.assertEquals(longData.getHighValue(), longDataFromDB.getHighValue()); + Assert.assertEquals(longData.getLowValue(), longDataFromDB.getLowValue()); + Assert.assertEquals(longData.getNumNulls(), longDataFromDB.getNumNulls()); + Assert.assertEquals(longData.getNumDVs(), longDataFromDB.getNumDVs()); + } + + @Test + public void doublePartitionStatistics() throws Exception { + createMockTableAndPartition(DOUBLE_TYPE, DOUBLE_VAL); + // Add partition stats for: DOUBLE_COL and partition: {PART_KEY, DOUBLE_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DOUBLE_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = doubleColStatsObjs.get(0); + DoubleColumnStatsData doubleData = obj.getStatsData().getDoubleStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(DOUBLE_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.DOUBLE_STATS, dataFromDB.getSetField()); + // Compare DoubleColumnStatsData + DoubleColumnStatsData doubleDataFromDB = dataFromDB.getDoubleStats(); + Assert.assertEquals(doubleData.getHighValue(), doubleDataFromDB.getHighValue(), 0.01); + Assert.assertEquals(doubleData.getLowValue(), doubleDataFromDB.getLowValue(), 0.01); + Assert.assertEquals(doubleData.getNumNulls(), doubleDataFromDB.getNumNulls()); + Assert.assertEquals(doubleData.getNumDVs(), doubleDataFromDB.getNumDVs()); + } + + @Test + public void stringPartitionStatistics() throws Exception { + createMockTableAndPartition(STRING_TYPE, STRING_VAL); + // Add partition stats for: STRING_COL and partition: {PART_KEY, STRING_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, STRING_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = stringColStatsObjs.get(0); + StringColumnStatsData stringData = obj.getStatsData().getStringStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(STRING_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.STRING_STATS, dataFromDB.getSetField()); + // Compare StringColumnStatsData + StringColumnStatsData stringDataFromDB = dataFromDB.getStringStats(); + Assert.assertEquals(stringData.getMaxColLen(), stringDataFromDB.getMaxColLen()); + Assert.assertEquals(stringData.getAvgColLen(), stringDataFromDB.getAvgColLen(), 0.01); + Assert.assertEquals(stringData.getNumNulls(), stringDataFromDB.getNumNulls()); + Assert.assertEquals(stringData.getNumDVs(), stringDataFromDB.getNumDVs()); + } + + @Test + public void binaryPartitionStatistics() throws Exception { + createMockTableAndPartition(BINARY_TYPE, BINARY_VAL); + // Add partition stats for: BINARY_COL and partition: {PART_KEY, BINARY_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BINARY_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = binaryColStatsObjs.get(0); + BinaryColumnStatsData binaryData = obj.getStatsData().getBinaryStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(BINARY_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.BINARY_STATS, dataFromDB.getSetField()); + // Compare BinaryColumnStatsData + BinaryColumnStatsData binaryDataFromDB = dataFromDB.getBinaryStats(); + Assert.assertEquals(binaryData.getMaxColLen(), binaryDataFromDB.getMaxColLen()); + Assert.assertEquals(binaryData.getAvgColLen(), binaryDataFromDB.getAvgColLen(), 0.01); + Assert.assertEquals(binaryData.getNumNulls(), binaryDataFromDB.getNumNulls()); + } + + @Test + public void decimalPartitionStatistics() throws Exception { + createMockTableAndPartition(DECIMAL_TYPE, DECIMAL_VAL); + // Add partition stats for: DECIMAL_COL and partition: {PART_KEY, DECIMAL_VAL} to DB + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + ColumnStatistics stats = new ColumnStatistics(); + // Get a default ColumnStatisticsDesc for partition level stats + ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DECIMAL_VAL); + stats.setStatsDesc(desc); + // Get one of the pre-created ColumnStatisticsObj + ColumnStatisticsObj obj = decimalColStatsObjs.get(0); + DecimalColumnStatsData decimalData = obj.getStatsData().getDecimalStats(); + // Add to DB + stats.addToStatsObj(obj); + List parVals = new ArrayList(); + parVals.add(DECIMAL_VAL); + store.updatePartitionColumnStatistics(stats, parVals); + // Get from DB + List partNames = new ArrayList(); + partNames.add(desc.getPartName()); + List colNames = new ArrayList(); + colNames.add(obj.getColName()); + List statsFromDB = store.getPartitionColumnStatistics(DB, TBL, partNames, colNames); + // Compare ColumnStatisticsDesc + Assert.assertEquals(1, statsFromDB.size()); + Assert.assertEquals(desc.getLastAnalyzed(), statsFromDB.get(0).getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(DB, statsFromDB.get(0).getStatsDesc().getDbName()); + Assert.assertEquals(TBL, statsFromDB.get(0).getStatsDesc().getTableName()); + Assert.assertFalse(statsFromDB.get(0).getStatsDesc().isIsTblLevel()); + // Compare ColumnStatisticsObj + Assert.assertEquals(1, statsFromDB.get(0).getStatsObjSize()); + ColumnStatisticsObj objFromDB = statsFromDB.get(0).getStatsObj().get(0); + ColumnStatisticsData dataFromDB = objFromDB.getStatsData(); + // Compare ColumnStatisticsData + Assert.assertEquals(ColumnStatisticsData._Fields.DECIMAL_STATS, dataFromDB.getSetField()); + // Compare DecimalColumnStatsData + DecimalColumnStatsData decimalDataFromDB = dataFromDB.getDecimalStats(); + Assert.assertEquals(decimalData.getHighValue(), decimalDataFromDB.getHighValue()); + Assert.assertEquals(decimalData.getLowValue(), decimalDataFromDB.getLowValue()); + Assert.assertEquals(decimalData.getNumNulls(), decimalDataFromDB.getNumNulls()); + Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs()); + } + + private Table createMockTableAndPartition(String partType, String partVal) throws Exception { + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", partType, "")); + List vals = new ArrayList(); + vals.add(partVal); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + Map params = new HashMap(); + params.put("key", "value"); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + int currentTime = (int)(System.currentTimeMillis() / 1000); + Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, + emptyParameters, null, null, null); + store.createTable(table); + Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, + emptyParameters); + store.addPartition(part); + return table; + } + /** + * Returns a dummy table level ColumnStatisticsDesc with default values + */ + private ColumnStatisticsDesc getMockTblColStatsDesc() { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(DEFAULT_TIME); + desc.setDbName(DB); + desc.setTableName(TBL); + desc.setIsTblLevel(true); + return desc; + } + + /** + * Returns a dummy partition level ColumnStatisticsDesc + */ + private ColumnStatisticsDesc getMockPartColStatsDesc(String partKey, String partVal) { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(DEFAULT_TIME); + desc.setDbName(DB); + desc.setTableName(TBL); + // part1=val1 + desc.setPartName(partKey + PART_KV_SEPARATOR + partVal); + desc.setIsTblLevel(false); + return desc; + } + +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java new file mode 100644 index 0000000..7ccfdb4 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java @@ -0,0 +1,378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * + */ +public class TestHBaseStoreCached { + private static final Log LOG = LogFactory.getLog(TestHBaseStoreCached.class.getName()); + static Map emptyParameters = new HashMap(); + + @Rule public ExpectedException thrown = ExpectedException.none(); + @Mock HTableInterface htable; + SortedMap rows = new TreeMap(); + HBaseStore store; + + @Before + public void init() throws IOException { + MockitoAnnotations.initMocks(this); + HiveConf conf = new HiveConf(); + store = MockUtils.init(conf, htable, rows); + } + + @Test + public void createTable() throws Exception { + String tableName = "mytable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + } + + @Test + public void alterTable() throws Exception { + String tableName = "alttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + startTime += 10; + table.setLastAccessTime(startTime); + store.alterTable("default", tableName, table); + + Table t = store.getTable("default", tableName); + Assert.assertEquals(1, t.getSd().getColsSize()); + Assert.assertEquals("col1", t.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", t.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", t.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp", t.getSd().getLocation()); + Assert.assertEquals("input", t.getSd().getInputFormat()); + Assert.assertEquals("output", t.getSd().getOutputFormat()); + Assert.assertEquals("me", t.getOwner()); + Assert.assertEquals("default", t.getDbName()); + Assert.assertEquals(tableName, t.getTableName()); + Assert.assertEquals(startTime, t.getLastAccessTime()); + } + + @Test + public void dropTable() throws Exception { + String tableName = "dtable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, null); + store.createTable(table); + + Table t = store.getTable("default", tableName); + Assert.assertNotNull(t); + + store.dropTable("default", tableName); + Assert.assertNull(store.getTable("default", tableName)); + } + + @Test + public void createPartition() throws Exception { + String dbName = "default"; + String tableName = "myparttable"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(dbName, tableName, vals); + Assert.assertEquals(1, p.getSd().getColsSize()); + Assert.assertEquals("col1", p.getSd().getCols().get(0).getName()); + Assert.assertEquals("int", p.getSd().getCols().get(0).getType()); + Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment()); + Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName()); + Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib()); + Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation()); + Assert.assertEquals("input", p.getSd().getInputFormat()); + Assert.assertEquals("output", p.getSd().getOutputFormat()); + Assert.assertEquals(dbName, p.getDbName()); + Assert.assertEquals(tableName, p.getTableName()); + Assert.assertEquals(1, p.getValuesSize()); + Assert.assertEquals("fred", p.getValues().get(0)); + } + + @Test + public void getPartitions() throws Exception { + String dbName = "default"; + String tableName = "manyParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); + for (String val : partVals) { + List vals = new ArrayList(); + vals.add(val); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + val); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Partition p = store.getPartition(dbName, tableName, vals); + Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation()); + } + + List parts = store.getPartitions(dbName, tableName, -1); + Assert.assertEquals(5, parts.size()); + String[] pv = new String[5]; + for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0); + Arrays.sort(pv); + Assert.assertArrayEquals(pv, partVals.toArray(new String[5])); + } + + @Test + public void listGetDropPartitionNames() throws Exception { + String dbName = "default"; + String tableName = "listParts"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + partCols.add(new FieldSchema("region", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; + for (String[] pv : partVals) { + List vals = new ArrayList(); + for (String v : pv) vals.add(v); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + } + + List names = store.listPartitionNames(dbName, tableName, (short) -1); + Assert.assertEquals(2, names.size()); + String[] resultNames = names.toArray(new String[names.size()]); + Arrays.sort(resultNames); + Assert.assertArrayEquals(resultNames, new String[]{"pc=today/region=north america", + "pc=tomorrow/region=europe"}); + + List parts = store.getPartitionsByNames(dbName, tableName, names); + Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2])); + Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2])); + + store.dropPartitions(dbName, tableName, names); + List afterDropParts = store.getPartitions(dbName, tableName, -1); + Assert.assertEquals(0, afterDropParts.size()); + } + + + @Test + public void dropPartition() throws Exception { + String dbName = "default"; + String tableName = "myparttable2"; + int startTime = (int)(System.currentTimeMillis() / 1000); + List cols = new ArrayList(); + cols.add(new FieldSchema("col1", "int", "nocomment")); + SerDeInfo serde = new SerDeInfo("serde", "seriallib", null); + StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, + serde, null, null, emptyParameters); + List partCols = new ArrayList(); + partCols.add(new FieldSchema("pc", "string", "")); + Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, + emptyParameters, null, null, null); + store.createTable(table); + + List vals = Arrays.asList("fred"); + StorageDescriptor psd = new StorageDescriptor(sd); + psd.setLocation("file:/tmp/pc=fred"); + Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, + emptyParameters); + store.addPartition(part); + + Assert.assertNotNull(store.getPartition(dbName, tableName, vals)); + store.dropPartition(dbName, tableName, vals); + thrown.expect(NoSuchObjectException.class); + store.getPartition(dbName, tableName, vals); + } + + // Due to the way our mock stuff works, we can only insert one column at a time, so we'll test + // each stat type separately. We'll test them together in hte integration tests. + @Test + public void booleanTableStatistics() throws Exception { + // Because of the way our mock implementation works we actually need to not create the table + // before we set statistics on it. + long now = System.currentTimeMillis(); + String dbname = "default"; + String tableName = "statstable"; + String boolcol = "boolcol"; + long trues = 37; + long falses = 12; + long booleanNulls = 2; + + ColumnStatistics stats = new ColumnStatistics(); + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(); + desc.setLastAnalyzed(now); + desc.setDbName(dbname); + desc.setTableName(tableName); + desc.setIsTblLevel(true); + stats.setStatsDesc(desc); + + ColumnStatisticsObj obj = new ColumnStatisticsObj(); + obj.setColName(boolcol); + obj.setColType("boolean"); + ColumnStatisticsData data = new ColumnStatisticsData(); + BooleanColumnStatsData boolData = new BooleanColumnStatsData(); + boolData.setNumTrues(trues); + boolData.setNumFalses(falses); + boolData.setNumNulls(booleanNulls); + data.setBooleanStats(boolData); + obj.setStatsData(data); + stats.addToStatsObj(obj); + + store.updateTableColumnStatistics(stats); + + stats = store.getTableColumnStatistics(dbname, tableName, Arrays.asList(boolcol)); + Assert.assertEquals(now, stats.getStatsDesc().getLastAnalyzed()); + Assert.assertEquals(dbname, stats.getStatsDesc().getDbName()); + Assert.assertEquals(tableName, stats.getStatsDesc().getTableName()); + Assert.assertTrue(stats.getStatsDesc().isIsTblLevel()); + + Assert.assertEquals(1, stats.getStatsObjSize()); + ColumnStatisticsData colData = obj.getStatsData(); + Assert.assertEquals(ColumnStatisticsData._Fields.BOOLEAN_STATS, colData.getSetField()); + boolData = colData.getBooleanStats(); + Assert.assertEquals(trues, boolData.getNumTrues()); + Assert.assertEquals(falses, boolData.getNumFalses()); + Assert.assertEquals(booleanNulls, boolData.getNumNulls()); + } + + +} diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java new file mode 100644 index 0000000..fdfb6d1 --- /dev/null +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + + +/** + * + */ +public class TestSharedStorageDescriptor { + private static final Log LOG = LogFactory.getLog(TestHBaseStore.class.getName()); + + + @Test + public void changeOnSerde() { + StorageDescriptor sd = new StorageDescriptor(); + SerDeInfo serde = new SerDeInfo(); + serde.setName("serde"); + sd.setSerdeInfo(serde); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.getSerdeInfo().setName("different"); + Assert.assertFalse(sd.getSerdeInfo() == ssd.getSerdeInfo()); + Assert.assertEquals("serde", serde.getName()); + Assert.assertEquals("different", ssd.getSerdeInfo().getName()); + Assert.assertEquals("serde", sd.getSerdeInfo().getName()); + } + + @Test + public void changeOnSkewed() { + StorageDescriptor sd = new StorageDescriptor(); + SkewedInfo skew = new SkewedInfo(); + sd.setSkewedInfo(skew); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.setSkewedInfo(new SkewedInfo()); + Assert.assertFalse(sd.getSkewedInfo() == ssd.getSkewedInfo()); + } + + @Test + public void changeOnUnset() { + StorageDescriptor sd = new StorageDescriptor(); + SkewedInfo skew = new SkewedInfo(); + sd.setSkewedInfo(skew); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.unsetSkewedInfo(); + Assert.assertFalse(sd.getSkewedInfo() == ssd.getSkewedInfo()); + } + + @Test + public void changeOrder() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToSortCols(new Order("fred", 1)); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.getSortCols().get(0).setOrder(2); + Assert.assertFalse(sd.getSortCols() == ssd.getSortCols()); + Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder()); + Assert.assertEquals(1, sd.getSortCols().get(0).getOrder()); + } + + @Test + public void unsetOrder() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToSortCols(new Order("fred", 1)); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.unsetSortCols(); + Assert.assertFalse(sd.getSortCols() == ssd.getSortCols()); + Assert.assertEquals(0, ssd.getSortColsSize()); + Assert.assertEquals(1, sd.getSortColsSize()); + } + + @Test + public void changeBucketList() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToBucketCols(new String("fred")); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + List list = ssd.getBucketCols(); + list.add(new String("bob")); + Assert.assertFalse(sd.getBucketCols() == ssd.getBucketCols()); + Assert.assertEquals(2, ssd.getBucketColsSize()); + Assert.assertEquals(1, sd.getBucketColsSize()); + } + + @Test + public void addToColList() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToCols(new FieldSchema("fred", "string", "")); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.addToCols(new FieldSchema("joe", "int", "")); + Assert.assertFalse(sd.getCols() == ssd.getCols()); + Assert.assertEquals(2, ssd.getColsSize()); + Assert.assertEquals(1, sd.getColsSize()); + } + + @Test + public void colIterator() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToCols(new FieldSchema("fred", "string", "")); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + Iterator iter = ssd.getColsIterator(); + Assert.assertTrue(iter.hasNext()); + Assert.assertEquals("fred", iter.next().getName()); + Assert.assertFalse(sd.getCols() == ssd.getCols()); + } + + @Test + public void setReadOnly() { + StorageDescriptor sd = new StorageDescriptor(); + sd.addToCols(new FieldSchema("fred", "string", "")); + SharedStorageDescriptor ssd = new SharedStorageDescriptor(); + ssd.setShared(sd); + ssd.setReadOnly(); + List cols = ssd.getCols(); + Assert.assertEquals(1, cols.size()); + Assert.assertTrue(sd.getCols() == ssd.getCols()); + } + +} diff --git a/pom.xml b/pom.xml index b55e86a..e249a30 100644 --- a/pom.xml +++ b/pom.xml @@ -126,7 +126,7 @@ 2.6.0 ${basedir}/${hive.path.to.root}/testutils/hadoop 0.98.9-hadoop1 - 0.98.9-hadoop2 + 1.1.1 4.4 4.4 @@ -174,6 +174,7 @@ 2.4.0 2.6.0 3.0.0 + 0.6.0 2.2.4 diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java index 5ea9b6e..2153f0e 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Adjacency implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Adjacency"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java index e621cfa..f864c18 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Graph implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Graph"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java index 1b18aab..a7ec4e4 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Operator implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Operator"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java index 5c5e0f8..2f64123 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Query implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Query"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java index d340d58..5ccceb1 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class QueryPlan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("QueryPlan"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java index 7353933..706e335 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Stage implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Stage"); diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java index 09a2e7f..2d55d7a 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Task implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Task"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 4030075..2ae33ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -409,6 +409,12 @@ public int compile(String command, boolean resetTaskIds) { getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, HiveSemanticAnalyzerHook.class); + // Flush the metastore cache. This assures that we don't pick up objects from a previous + // query running in this same thread. This has to be done after we get our semantic + // analyzer (this is when the connection to the metastore is made) but before we analyze, + // because at that point we need access to the objects. + Hive.get().getMSC().flushCache(); + // Do semantic analysis and plan generation if (saHooks != null && !saHooks.isEmpty()) { HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 734742c..cd5f984 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -18,36 +18,6 @@ package org.apache.hadoop.hive.ql.exec; -import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.util.StringUtils.stringifyException; - -import java.io.BufferedWriter; -import java.io.DataOutputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Serializable; -import java.io.Writer; -import java.net.URI; -import java.net.URISyntaxException; -import java.sql.SQLException; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; - import com.google.common.collect.Iterables; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; @@ -215,6 +185,36 @@ import org.apache.hive.common.util.ReflectionUtil; import org.stringtemplate.v4.ST; +import java.io.BufferedWriter; +import java.io.DataOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Serializable; +import java.io.Writer; +import java.net.URI; +import java.net.URISyntaxException; +import java.sql.SQLException; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.util.StringUtils.stringifyException; + /** * DDLTask implementation. * @@ -3244,7 +3244,9 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } } - Table oldTbl = tbl.copy(); + // Don't change the table object returned by the metastore, as we'll mess with it's caches. + Table oldTbl = tbl; + tbl = oldTbl.copy(); if (allPartitions != null) { // Alter all partitions for (Partition part : allPartitions) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index c449aee..0b45ce7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3067,8 +3067,13 @@ public HiveMetaHook getHook( } } }; - return RetryingMetaStoreClient.getProxy(conf, hookLoader, metaCallTimeMap, - SessionHiveMetaStoreClient.class.getName()); + + if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + return new SessionHiveMetaStoreClient(conf, hookLoader); + } else { + return RetryingMetaStoreClient.getProxy(conf, hookLoader, metaCallTimeMap, + SessionHiveMetaStoreClient.class.getName()); + } } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 9546191..9f9b5bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; @@ -46,9 +45,6 @@ import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.OutputFormat; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.transport.TMemoryBuffer; /** * A Hive Table Partition: is a fundamental storage unit within a Table. @@ -95,7 +91,7 @@ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { - tPart.setSd(tbl.getTTable().getSd()); // TODO: get a copy + tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); } @@ -140,33 +136,13 @@ public Partition(Table tbl, Map partSpec, Path location) throws tpart.setValues(pvals); if (!tbl.isView()) { - tpart.setSd(cloneSd(tbl)); + tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; } /** - * We already have methods that clone stuff using XML or Kryo. - * And now for something completely different - let's clone SD using Thrift! - * Refactored into a method. - */ - public static StorageDescriptor cloneSd(Table tbl) throws HiveException { - StorageDescriptor sd = new StorageDescriptor(); - try { - // replace with THRIFT-138 - TMemoryBuffer buffer = new TMemoryBuffer(1024); - TBinaryProtocol prot = new TBinaryProtocol(buffer); - tbl.getTTable().getSd().write(prot); - sd.read(prot); - } catch (TException e) { - LOG.error("Could not create a copy of StorageDescription"); - throw new HiveException("Could not create a copy of StorageDescription",e); - } - return sd; - } - - /** * Initializes this object with the given variables * * @param table @@ -563,6 +539,7 @@ public boolean isStoredAsSubDirectories() { } public List getSkewedColNames() { + LOG.debug("sd is " + tPartition.getSd().getClass().getName()); return tPartition.getSd().getSkewedInfo().getSkewedColNames(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index 84f3f76..01fb748 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -453,7 +453,7 @@ public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. - wrapperApiPart.setSd(t.getSd()); + wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 55aea0e..351cb2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -737,6 +737,9 @@ public static ColStatistics getColStatistics(ColumnStatisticsObj cso, String tab } private static List convertColStats(List colStats, String tabName) { + if (colStats==null) { + return new ArrayList(); + } List stats = new ArrayList(colStats.size()); for (ColumnStatisticsObj statObj : colStats) { ColStatistics cs = getColStatistics(statObj, tabName, statObj.getColName()); diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java index e226528..d6d513d 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java @@ -253,7 +253,7 @@ private void addPartition(HiveMetaStoreClient client, Table table, part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap()); - part.setSd(table.getSd()); + part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index 8001081..7e94f23 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -123,6 +123,7 @@ insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t desc formatted over1k_part2_orc partition(ds="foo",t=27); desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); +-- SORT_BEFORE_DIFF select * from over1k_part2_orc; select count(*) from over1k_part2_orc; @@ -132,6 +133,7 @@ insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t desc formatted over1k_part2_orc partition(ds="foo",t=27); desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); +-- SORT_BEFORE_DIFF select * from over1k_part2_orc; select count(*) from over1k_part2_orc; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index f842efe..ea670e9 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -117,6 +117,7 @@ insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); +-- SORT_BEFORE_DIFF select * from over1k_part2; select count(*) from over1k_part2; @@ -126,6 +127,7 @@ insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from desc formatted over1k_part2 partition(ds="foo",t=27); desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__"); +-- SORT_BEFORE_DIFF select * from over1k_part2; select count(*) from over1k_part2; diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out index 216a79c..1f6339a 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out @@ -1755,13 +1755,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2_orc +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2_orc +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 @@ -1900,13 +1902,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2_orc +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2_orc +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out index 41049bd..ebf4461 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out @@ -1655,13 +1655,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2 +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2 +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 @@ -1800,13 +1802,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2 +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2 +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index 4451046..f0fc221 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -1835,13 +1835,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2_orc +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2_orc +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 @@ -1980,13 +1982,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2_orc +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2_orc +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out index cb001b9..8d4c1b7 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out @@ -1735,13 +1735,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2 +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2 +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 @@ -1880,13 +1882,15 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from over1k_part2 +PREHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: select * from over1k_part2 +POSTHOOK: query: -- SORT_BEFORE_DIFF +select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm index ae449c5..01745da 100644 --- a/ql/src/test/templates/TestCliDriver.vm +++ b/ql/src/test/templates/TestCliDriver.vm @@ -45,13 +45,14 @@ public class $className extends TestCase { String hiveConfDir = "$hiveConfDir"; String initScript = "$initScript"; String cleanupScript = "$cleanupScript"; + boolean useHBaseMetastore = Boolean.valueOf("$useHBaseMetastore"); try { String hadoopVer = "$hadoopVersion"; if (!hiveConfDir.isEmpty()) { hiveConfDir = HIVE_ROOT + hiveConfDir; } qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, - hiveConfDir, hadoopVer, initScript, cleanupScript); + hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore); // do a one time initialization qt.cleanUp(); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java index db325af..eed53fa 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class InnerStruct implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InnerStruct"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java index 1232ff9..4410307 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ThriftTestObj implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftTestObj"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java index 4d2f5bf..59a1f7e 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class Complex implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Complex"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java index 23d7363..901fc4b 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class IntString implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IntString"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java index 9447708..cc3f375 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class MegaStruct implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MegaStruct"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java index d8c46f4..e7498f4 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class MiniStruct implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MiniStruct"); diff --git a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java index 58498b0..a2cbda2 100644 --- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java +++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class SetIntString implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetIntString"); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java new file mode 100644 index 0000000..ec43ae3 --- /dev/null +++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.serde2.binarysortable; + +import java.util.List; + +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; + +public class BinarySortableSerDeWithEndPrefix extends BinarySortableSerDe { + public static void serializeStruct(Output byteStream, Object[] fieldData, + List fieldOis, boolean endPrefix) throws SerDeException { + for (int i = 0; i < fieldData.length; i++) { + serialize(byteStream, fieldData[i], fieldOis.get(i), false); + } + if (endPrefix) { + if (fieldData[fieldData.length-1]!=null) { + byteStream.getData()[byteStream.getLength()-1]++; + } else { + byteStream.getData()[byteStream.getLength()-1]+=2; + } + } + } +} \ No newline at end of file diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java index d4b6972..7396d02 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HiveClusterStatus implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveClusterStatus"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java index 760c81e..e15a9e0 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class HiveServerException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveServerException"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java index df793b1..2a7fd9b 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class ThriftHive { public interface Iface extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface { diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java index 5625516..841139b 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TArrayTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TArrayTypeEntry"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java index 202399a..bfea569 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TBinaryColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBinaryColumn"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java index 921e9de..5c10fde 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TBoolColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolColumn"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolValue.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolValue.java index 201c9fb..86b5ce3 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolValue.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolValue.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TBoolValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolValue"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java index cd9b6da..3d42927 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TByteColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteColumn"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java index 42b5bd5..04f8e7c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteValue.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TByteValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteValue"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java index 6bdd53d..2630215 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCLIService.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCLIService { public interface Iface { diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java index 1097869..cdabe7d 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCancelDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java index 5469108..f821459 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCancelDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationReq.java index 83d191e..e63145a 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCancelOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationResp.java index b8d96df..56c9e76 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCancelOperationResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCancelOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationReq.java index ca68866..6ad5446 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCloseOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationResp.java index 092e7e4..3cd3643 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseOperationResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCloseOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionReq.java index a5d910f..7bca565 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCloseSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionResp.java index ae4b554..2ee0551 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TCloseSessionResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TCloseSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TColumnDesc.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TColumnDesc.java index e31aa81..ad2444e 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TColumnDesc.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TColumnDesc.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TColumnDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnDesc"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java index 7236d90..1f3b77e 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TDoubleColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleColumn"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleValue.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleValue.java index 4b0811e..59203b5 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleValue.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleValue.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TDoubleValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleValue"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java index feaed34..ee6ed29 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TExecuteStatementReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java index daf7b5c..074023c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TExecuteStatementResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java index 47d9a0b..6893eb9 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TFetchResultsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsResp.java index b9f3ef6..66116ea 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TFetchResultsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java index 0e63b9a..ad7ffa5 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetCatalogsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java index 7067ff7..651b1b0 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetCatalogsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsReq.java index a153968..a883ab8 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetColumnsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsResp.java index 49ecb98..0503062 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetColumnsResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetColumnsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java index 2c2fa0f..5778ea0 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java index 4222005..dc8ef44 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java index 5bc0540..8fd9690 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetFunctionsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java index a64c948..f24183e 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetFunctionsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoReq.java index 7dd6a14..fac38c8 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoResp.java index 3e16318..c54b6a9 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetInfoResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java index f4d5fae..4cc87d7 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetOperationStatusReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java index 897f7f5..b77148c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetOperationStatusResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java index 405ffc6..c69bbed 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetResultSetMetadataReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java index 3735310..d308d4c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetResultSetMetadataResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasReq.java index 33088b4..9f45078 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetSchemasReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasResp.java index ec598a3..6e85540 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetSchemasResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetSchemasResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java index 3e09a64..8321ce1 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTableTypesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java index cdba9b5..d7d9dc3 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTableTypesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java index 805e69f..d9e9e40 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTablesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesResp.java index 0d03dd8..65513a0 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTablesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java index 0628e21..47b8b38 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTypeInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java index 6058826..1ef8dc5 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TGetTypeInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/THandleIdentifier.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/THandleIdentifier.java index f20c4d0..fec1b78 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/THandleIdentifier.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/THandleIdentifier.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class THandleIdentifier implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THandleIdentifier"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java index 267d109..2634ef9 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI16Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Column"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Value.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Value.java index ddaf9ef..afdc29f 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Value.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Value.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI16Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Value"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java index c79df4f..cd59dc3 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI32Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Column"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Value.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Value.java index d521373..2886d4c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Value.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Value.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI32Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Value"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java index 746ade2..fc28197 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI64Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Column"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Value.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Value.java index b419b80..c628896 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Value.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Value.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TI64Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Value"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TMapTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TMapTypeEntry.java index 91a85ef..7a43c4d 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TMapTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TMapTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TMapTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMapTypeEntry"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionReq.java index 657f868..a2f6530 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TOpenSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionResp.java index 48f4b45..607847c 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOpenSessionResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TOpenSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOperationHandle.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOperationHandle.java index db41117..45a53f6 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOperationHandle.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TOperationHandle.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TOperationHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOperationHandle"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java index f1c8d58..6f246c1 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TPrimitiveTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPrimitiveTypeEntry"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java index 91f8b00..c7708e5 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TRenewDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenReq"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java index c01cc3f..38cc331 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TRenewDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenResp"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java index 197bab6..bbab399 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TRow implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRow"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java index cc1a148..dc93ff9 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TRowSet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowSet"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TSessionHandle.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TSessionHandle.java index 264e155..4ab6a3e 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TSessionHandle.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TSessionHandle.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TSessionHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSessionHandle"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java index 1cd7980..1ce3ac7 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TStatus implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStatus"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java index d996529..6883c1a 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TStringColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringColumn"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringValue.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringValue.java index 9655f38..2378060 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringValue.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringValue.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TStringValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringValue"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStructTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStructTypeEntry.java index d58184c..828b43a 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStructTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStructTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TStructTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStructTypeEntry"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java index 796b73b..f2ef9a4 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TTableSchema implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableSchema"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java index 95b4466..9aa071d 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TTypeDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeDesc"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeQualifiers.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeQualifiers.java index 15ac5a9..9480984 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeQualifiers.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeQualifiers.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TTypeQualifiers implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeQualifiers"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java index eccf303..8ff0766 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TUnionTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUnionTypeEntry"); diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java index 21da61b..7ccc1e8 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java @@ -34,7 +34,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-17") public class TUserDefinedTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUserDefinedTypeEntry"); diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote index 747c2be..54d59a8 100755 --- a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote +++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote @@ -157,6 +157,11 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' NotificationEventResponse get_next_notification(NotificationEventRequest rqst)') print(' CurrentNotificationEventId get_current_notificationEventId()') print(' FireEventResponse fire_listener_event(FireEventRequest rqst)') + print(' void flushCache()') + print(' GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req)') + print(' GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req)') + print(' PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)') + print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -166,8 +171,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' void setOption(string key, string value)') print(' string getOption(string key)') print(' getOptions()') + print(' string getCpuProfile(i32 profileDurationInSec)') print(' i64 aliveSince()') - print(' reflection_limited.Service getLimitedReflection()') print(' void reinitialize()') print(' void shutdown()') print('') @@ -1024,6 +1029,36 @@ elif cmd == 'fire_listener_event': sys.exit(1) pp.pprint(client.fire_listener_event(eval(args[0]),)) +elif cmd == 'flushCache': + if len(args) != 0: + print('flushCache requires 0 args') + sys.exit(1) + pp.pprint(client.flushCache()) + +elif cmd == 'get_file_metadata_by_expr': + if len(args) != 1: + print('get_file_metadata_by_expr requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata_by_expr(eval(args[0]),)) + +elif cmd == 'get_file_metadata': + if len(args) != 1: + print('get_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata(eval(args[0]),)) + +elif cmd == 'put_file_metadata': + if len(args) != 1: + print('put_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.put_file_metadata(eval(args[0]),)) + +elif cmd == 'clear_file_metadata': + if len(args) != 1: + print('clear_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.clear_file_metadata(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') @@ -1078,18 +1113,18 @@ elif cmd == 'getOptions': sys.exit(1) pp.pprint(client.getOptions()) +elif cmd == 'getCpuProfile': + if len(args) != 1: + print('getCpuProfile requires 1 args') + sys.exit(1) + pp.pprint(client.getCpuProfile(eval(args[0]),)) + elif cmd == 'aliveSince': if len(args) != 0: print('aliveSince requires 0 args') sys.exit(1) pp.pprint(client.aliveSince()) -elif cmd == 'getLimitedReflection': - if len(args) != 0: - print('getLimitedReflection requires 0 args') - sys.exit(1) - pp.pprint(client.getLimitedReflection()) - elif cmd == 'reinitialize': if len(args) != 0: print('reinitialize requires 0 args')